From 7d15399a4d18ae2061ddb01656d85dbc940ff915 Mon Sep 17 00:00:00 2001 From: khizmax Date: Thu, 18 Sep 2014 14:38:14 +0400 Subject: [PATCH] Move libcds 1.6.0 from SVN --- brush_cds.pl | 66 + build/Makefile | 272 + build/build.sh | 555 ++ build/sample/build-freebsd-amd64.sh | 13 + build/sample/build-hpux1123.sh | 19 + build/sample/build-hpux1131.sh | 19 + build/sample/build-linux-amd64.sh | 18 + build/sample/build-linux-ia64.sh | 17 + build/sample/build-linux-sparc.sh | 14 + build/sample/build-linux-x86.sh | 17 + build/sample/build-mingw-amd64.bat | 14 + build/sample/build-osx-clang-libc++.sh | 16 + build/sample/build-osx-gcc.sh | 16 + build/sample/build-sun-sparc.sh | 21 + cds/algo/base.h | 15 + cds/algo/elimination.h | 58 + cds/algo/elimination_opt.h | 37 + cds/algo/elimination_tls.h | 34 + cds/algo/flat_combining.h | 808 ++ cds/backoff_strategy.h | 426 ++ cds/bitop.h | 140 + cds/compiler/backoff.h | 32 + cds/compiler/bitop.h | 40 + cds/compiler/clang/cxx11_atomic_prepatches.h | 54 + cds/compiler/clang/defs.h | 128 + cds/compiler/cstdint_boost.h | 44 + cds/compiler/cstdint_std.h | 43 + cds/compiler/cxx11_atomic.h | 2277 ++++++ cds/compiler/cxx11_atomic_patches.h | 13 + cds/compiler/cxx11_atomic_prepatches.h | 10 + cds/compiler/defs.h | 28 + cds/compiler/gcc/amd64/backoff.h | 39 + cds/compiler/gcc/amd64/bitop.h | 157 + cds/compiler/gcc/amd64/cxx11_atomic.h | 207 + cds/compiler/gcc/compiler_barriers.h | 10 + cds/compiler/gcc/compiler_macro.h | 136 + cds/compiler/gcc/cxx11_atomic_patches.h | 47 + cds/compiler/gcc/defs.h | 143 + cds/compiler/gcc/ia64/backoff.h | 37 + cds/compiler/gcc/ia64/bitop.h | 62 + cds/compiler/gcc/ia64/cxx11_atomic.h | 653 ++ cds/compiler/gcc/ppc64/backoff.h | 19 + cds/compiler/gcc/ppc64/bitop.h | 17 + cds/compiler/gcc/sparc/backoff.h | 30 + cds/compiler/gcc/sparc/bitop.h | 42 + cds/compiler/gcc/sparc/cxx11_atomic.h | 610 ++ cds/compiler/gcc/x86/backoff.h | 39 + cds/compiler/gcc/x86/bitop.h | 86 + cds/compiler/gcc/x86/cxx11_atomic.h | 184 + cds/compiler/gcc/x86/cxx11_atomic32.h | 474 ++ cds/compiler/icl/compiler_barriers.h | 30 + cds/compiler/icl/cxx11_atomic_patches_win.h | 27 + cds/compiler/icl/defs.h | 207 + cds/compiler/vc/amd64/backoff.h | 32 + cds/compiler/vc/amd64/bitop.h | 126 + cds/compiler/vc/amd64/cxx11_atomic.h | 584 ++ cds/compiler/vc/compiler_barriers.h | 29 + cds/compiler/vc/defs.h | 196 + cds/compiler/vc/x86/backoff.h | 35 + cds/compiler/vc/x86/bitop.h | 83 + cds/compiler/vc/x86/cxx11_atomic.h | 556 ++ cds/container/base.h | 60 + cds/container/basket_queue.h | 393 + cds/container/cuckoo_base.h | 211 + cds/container/cuckoo_map.h | 906 +++ cds/container/cuckoo_set.h | 987 +++ cds/container/details/guarded_ptr_cast.h | 30 + cds/container/details/make_lazy_kvlist.h | 89 + cds/container/details/make_lazy_list.h | 79 + cds/container/details/make_michael_kvlist.h | 89 + cds/container/details/make_michael_list.h | 82 + cds/container/details/make_skip_list_map.h | 131 + cds/container/details/make_skip_list_set.h | 94 + cds/container/details/make_split_list_set.h | 239 + cds/container/ellen_bintree_base.h | 374 + cds/container/ellen_bintree_map_hp.h | 9 + cds/container/ellen_bintree_map_impl.h | 677 ++ cds/container/ellen_bintree_map_ptb.h | 9 + cds/container/ellen_bintree_map_rcu.h | 704 ++ cds/container/ellen_bintree_set_hp.h | 9 + cds/container/ellen_bintree_set_impl.h | 753 ++ cds/container/ellen_bintree_set_ptb.h | 9 + cds/container/ellen_bintree_set_rcu.h | 785 ++ cds/container/fcdeque.h | 481 ++ cds/container/fcpriority_queue.h | 293 + cds/container/fcqueue.h | 393 + cds/container/fcstack.h | 369 + cds/container/lazy_kvlist_hp.h | 11 + cds/container/lazy_kvlist_hrc.h | 11 + cds/container/lazy_kvlist_impl.h | 926 +++ cds/container/lazy_kvlist_nogc.h | 621 ++ cds/container/lazy_kvlist_ptb.h | 11 + cds/container/lazy_kvlist_rcu.h | 947 +++ cds/container/lazy_list_base.h | 127 + cds/container/lazy_list_hp.h | 11 + cds/container/lazy_list_hrc.h | 11 + cds/container/lazy_list_impl.h | 963 +++ cds/container/lazy_list_nogc.h | 467 ++ cds/container/lazy_list_ptb.h | 11 + cds/container/lazy_list_rcu.h | 997 +++ cds/container/michael_deque.h | 493 ++ cds/container/michael_kvlist_hp.h | 11 + cds/container/michael_kvlist_hrc.h | 11 + cds/container/michael_kvlist_impl.h | 911 +++ cds/container/michael_kvlist_nogc.h | 602 ++ cds/container/michael_kvlist_ptb.h | 11 + cds/container/michael_kvlist_rcu.h | 929 +++ cds/container/michael_list_base.h | 112 + cds/container/michael_list_hp.h | 11 + cds/container/michael_list_hrc.h | 11 + cds/container/michael_list_impl.h | 935 +++ cds/container/michael_list_nogc.h | 450 ++ cds/container/michael_list_ptb.h | 11 + cds/container/michael_list_rcu.h | 967 +++ cds/container/michael_map.h | 820 ++ cds/container/michael_map_base.h | 36 + cds/container/michael_map_nogc.h | 503 ++ cds/container/michael_map_rcu.h | 781 ++ cds/container/michael_set.h | 772 ++ cds/container/michael_set_base.h | 44 + cds/container/michael_set_nogc.h | 329 + cds/container/michael_set_rcu.h | 754 ++ cds/container/moir_queue.h | 339 + cds/container/mspriority_queue.h | 337 + cds/container/msqueue.h | 345 + cds/container/optimistic_queue.h | 354 + cds/container/rwqueue.h | 362 + cds/container/segmented_queue.h | 409 + cds/container/skip_list_base.h | 299 + cds/container/skip_list_map_hp.h | 11 + cds/container/skip_list_map_hrc.h | 11 + cds/container/skip_list_map_impl.h | 803 ++ cds/container/skip_list_map_nogc.h | 386 + cds/container/skip_list_map_ptb.h | 11 + cds/container/skip_list_map_rcu.h | 807 ++ cds/container/skip_list_set_hp.h | 11 + cds/container/skip_list_set_hrc.h | 11 + cds/container/skip_list_set_impl.h | 843 +++ cds/container/skip_list_set_nogc.h | 423 ++ cds/container/skip_list_set_ptb.h | 11 + cds/container/skip_list_set_rcu.h | 883 +++ cds/container/split_list_base.h | 176 + cds/container/split_list_map.h | 660 ++ cds/container/split_list_map_nogc.h | 289 + cds/container/split_list_map_rcu.h | 725 ++ cds/container/split_list_set.h | 892 +++ cds/container/split_list_set_nogc.h | 360 + cds/container/split_list_set_rcu.h | 966 +++ cds/container/striped_map.h | 1000 +++ cds/container/striped_map/boost_flat_map.h | 58 + cds/container/striped_map/boost_list.h | 273 + cds/container/striped_map/boost_map.h | 54 + cds/container/striped_map/boost_slist.h | 285 + .../striped_map/boost_unordered_map.h | 50 + cds/container/striped_map/std_hash_map.h | 13 + cds/container/striped_map/std_hash_map_std.h | 195 + cds/container/striped_map/std_hash_map_vc.h | 182 + cds/container/striped_map/std_list.h | 301 + cds/container/striped_map/std_map.h | 190 + cds/container/striped_set.h | 1009 +++ cds/container/striped_set/adapter.h | 491 ++ cds/container/striped_set/boost_flat_set.h | 57 + cds/container/striped_set/boost_list.h | 266 + cds/container/striped_set/boost_set.h | 56 + cds/container/striped_set/boost_slist.h | 277 + .../striped_set/boost_stable_vector.h | 354 + .../striped_set/boost_unordered_set.h | 47 + cds/container/striped_set/boost_vector.h | 261 + cds/container/striped_set/std_hash_set.h | 13 + cds/container/striped_set/std_hash_set_std.h | 178 + cds/container/striped_set/std_hash_set_vc.h | 166 + cds/container/striped_set/std_list.h | 302 + cds/container/striped_set/std_set.h | 175 + cds/container/striped_set/std_vector.h | 263 + cds/container/treiber_stack.h | 297 + cds/container/tsigas_cycle_queue.h | 341 + cds/container/vyukov_mpmc_cycle_queue.h | 392 + cds/cxx11_atomic.h | 378 + cds/details/aligned_allocator.h | 158 + cds/details/aligned_type.h | 80 + cds/details/allocator.h | 307 + cds/details/binary_functor_wrapper.h | 62 + cds/details/bit_reverse_counter.h | 73 + cds/details/bitop_generic.h | 271 + cds/details/bounded_array.h | 231 + cds/details/bounded_container.h | 15 + cds/details/comparator.h | 61 + cds/details/cxx11_features.h | 26 + cds/details/defs.h | 539 ++ cds/details/functor_wrapper.h | 152 + cds/details/hash_functor_selector.h | 51 + cds/details/is_aligned.h | 36 + cds/details/lib.h | 28 + cds/details/make_const_type.h | 30 + cds/details/marked_ptr.h | 374 + cds/details/noncopyable.h | 15 + cds/details/static_functor.h | 21 + cds/details/std/chrono.h | 24 + cds/details/std/condition_variable.h | 26 + cds/details/std/memory.h | 108 + cds/details/std/mutex.h | 39 + cds/details/std/thread.h | 26 + cds/details/std/tuple.h | 30 + cds/details/std/type_traits.h | 89 + cds/details/trivial_assign.h | 22 + cds/details/type_padding.h | 56 + cds/details/void_selector.h | 27 + cds/gc/all.h | 10 + cds/gc/default_gc.h | 16 + cds/gc/details/retired_ptr.h | 92 + cds/gc/exception.h | 15 + cds/gc/gc_fwd.h | 19 + cds/gc/guarded_ptr.h | 218 + cds/gc/hp.h | 10 + cds/gc/hp_decl.h | 567 ++ cds/gc/hp_impl.h | 57 + cds/gc/hrc.h | 10 + cds/gc/hrc/details/hrc_fwd.h | 16 + cds/gc/hrc/details/hrc_inline.h | 64 + cds/gc/hrc/details/hrc_retired.h | 193 + cds/gc/hrc/gc_fwd.h | 15 + cds/gc/hrc/hrc.h | 690 ++ cds/gc/hrc_decl.h | 840 +++ cds/gc/hrc_impl.h | 57 + cds/gc/hzp/details/hp_alloc.h | 322 + cds/gc/hzp/details/hp_fwd.h | 15 + cds/gc/hzp/details/hp_inline.h | 26 + cds/gc/hzp/details/hp_retired.h | 86 + cds/gc/hzp/details/hp_type.h | 23 + cds/gc/hzp/hzp.h | 656 ++ cds/gc/nogc.h | 29 + cds/gc/ptb.h | 10 + cds/gc/ptb/ptb.h | 1027 +++ cds/gc/ptb_decl.h | 478 ++ cds/gc/ptb_impl.h | 43 + cds/init.h | 89 + cds/int_algo.h | 74 + cds/intrusive/base.h | 160 + cds/intrusive/basket_queue.h | 813 ++ cds/intrusive/cuckoo_set.h | 2878 +++++++ cds/intrusive/deque_stat.h | 82 + cds/intrusive/details/dummy_node_holder.h | 67 + cds/intrusive/details/ellen_bintree_base.h | 688 ++ cds/intrusive/ellen_bintree_hp.h | 9 + cds/intrusive/ellen_bintree_impl.h | 1639 ++++ cds/intrusive/ellen_bintree_ptb.h | 9 + cds/intrusive/ellen_bintree_rcu.h | 2071 ++++++ cds/intrusive/fcqueue.h | 345 + cds/intrusive/fcstack.h | 329 + cds/intrusive/lazy_list_base.h | 320 + cds/intrusive/lazy_list_hp.h | 9 + cds/intrusive/lazy_list_hrc.h | 152 + cds/intrusive/lazy_list_impl.h | 1234 +++ cds/intrusive/lazy_list_nogc.h | 681 ++ cds/intrusive/lazy_list_ptb.h | 9 + cds/intrusive/lazy_list_rcu.h | 1227 +++ cds/intrusive/michael_deque.h | 993 +++ cds/intrusive/michael_list_base.h | 263 + cds/intrusive/michael_list_hp.h | 9 + cds/intrusive/michael_list_hrc.h | 76 + cds/intrusive/michael_list_impl.h | 1174 +++ cds/intrusive/michael_list_nogc.h | 644 ++ cds/intrusive/michael_list_ptb.h | 9 + cds/intrusive/michael_list_rcu.h | 1080 +++ cds/intrusive/michael_set.h | 811 ++ cds/intrusive/michael_set_base.h | 207 + cds/intrusive/michael_set_nogc.h | 386 + cds/intrusive/michael_set_rcu.h | 681 ++ cds/intrusive/moir_queue.h | 173 + cds/intrusive/mspriority_queue.h | 515 ++ cds/intrusive/msqueue.h | 427 ++ cds/intrusive/node_traits.h | 166 + cds/intrusive/optimistic_queue.h | 617 ++ cds/intrusive/options.h | 161 + cds/intrusive/queue_stat.h | 93 + cds/intrusive/segmented_queue.h | 681 ++ cds/intrusive/single_link_struct.h | 227 + cds/intrusive/skip_list_base.h | 654 ++ cds/intrusive/skip_list_hp.h | 9 + cds/intrusive/skip_list_hrc.h | 201 + cds/intrusive/skip_list_impl.h | 1749 +++++ cds/intrusive/skip_list_nogc.h | 1034 +++ cds/intrusive/skip_list_ptb.h | 9 + cds/intrusive/skip_list_rcu.h | 2227 ++++++ cds/intrusive/split_list.h | 1115 +++ cds/intrusive/split_list_base.h | 824 ++ cds/intrusive/split_list_nogc.h | 624 ++ cds/intrusive/split_list_rcu.h | 1011 +++ cds/intrusive/striped_set.h | 894 +++ cds/intrusive/striped_set/adapter.h | 371 + cds/intrusive/striped_set/boost_avl_set.h | 25 + cds/intrusive/striped_set/boost_list.h | 209 + cds/intrusive/striped_set/boost_set.h | 25 + cds/intrusive/striped_set/boost_sg_set.h | 25 + cds/intrusive/striped_set/boost_slist.h | 226 + cds/intrusive/striped_set/boost_splay_set.h | 25 + cds/intrusive/striped_set/boost_treap_set.h | 25 + .../striped_set/boost_unordered_set.h | 192 + cds/intrusive/striped_set/resizing_policy.h | 189 + cds/intrusive/striped_set/striping_policy.h | 360 + cds/intrusive/treiber_stack.h | 696 ++ cds/intrusive/tsigas_cycle_queue.h | 370 + cds/intrusive/vyukov_mpmc_cycle_queue.h | 176 + cds/lock/array.h | 324 + cds/lock/scoped_lock.h | 72 + cds/lock/spinlock.h | 422 ++ cds/memory/michael/allocator.h | 1917 +++++ cds/memory/michael/bound_check.h | 153 + cds/memory/michael/options.h | 254 + cds/memory/michael/osalloc_stat.h | 111 + cds/memory/michael/procheap_stat.h | 392 + cds/memory/pool_allocator.h | 129 + cds/memory/vyukov_queue_pool.h | 481 ++ cds/numtraits.h | 247 + cds/opt/buffer.h | 243 + cds/opt/compare.h | 264 + cds/opt/hash.h | 571 ++ cds/opt/make_options_std.h | 535 ++ cds/opt/make_options_var.h | 398 + cds/opt/options.h | 713 ++ cds/opt/permutation.h | 293 + cds/opt/value_cleaner.h | 76 + cds/os/aix/alloc_aligned.h | 9 + cds/os/aix/timer.h | 92 + cds/os/aix/topology.h | 79 + cds/os/alloc_aligned.h | 188 + cds/os/details/fake_topology.h | 39 + cds/os/free_bsd/alloc_aligned.h | 9 + cds/os/free_bsd/timer.h | 96 + cds/os/free_bsd/topology.h | 78 + cds/os/hpux/alloc_aligned.h | 9 + cds/os/hpux/timer.h | 8 + cds/os/hpux/topology.h | 80 + cds/os/libc/alloc_aligned.h | 36 + cds/os/linux/alloc_aligned.h | 15 + cds/os/linux/timer.h | 90 + cds/os/linux/topology.h | 92 + cds/os/osx/timer.h | 103 + cds/os/osx/topology.h | 60 + cds/os/posix/alloc_aligned.h | 45 + cds/os/posix/fake_topology.h | 50 + cds/os/posix/syserror.h | 40 + cds/os/posix/thread.h | 55 + cds/os/posix/timer.h | 87 + cds/os/sunos/alloc_aligned.h | 9 + cds/os/sunos/timer.h | 71 + cds/os/sunos/topology.h | 60 + cds/os/syserror.h | 14 + cds/os/thread.h | 14 + cds/os/timer.h | 31 + cds/os/topology.h | 28 + cds/os/win/alloc_aligned.h | 36 + cds/os/win/syserror.h | 48 + cds/os/win/thread.h | 68 + cds/os/win/timer.h | 103 + cds/os/win/topology.h | 69 + cds/ref.h | 54 + cds/refcounter.h | 84 + cds/threading/details/_common.h | 278 + cds/threading/details/auto_detect.h | 45 + cds/threading/details/cxx11.h | 16 + cds/threading/details/cxx11_manager.h | 147 + cds/threading/details/gcc.h | 16 + cds/threading/details/gcc_manager.h | 147 + cds/threading/details/msvc.h | 16 + cds/threading/details/msvc_manager.h | 146 + cds/threading/details/pthread.h | 16 + cds/threading/details/pthread_manager.h | 242 + cds/threading/details/wintls.h | 17 + cds/threading/details/wintls_manager.h | 244 + cds/threading/model.h | 99 + cds/urcu/details/base.h | 482 ++ cds/urcu/details/check_deadlock.h | 41 + cds/urcu/details/gp.h | 101 + cds/urcu/details/gp_decl.h | 178 + cds/urcu/details/gpb.h | 222 + cds/urcu/details/gpi.h | 158 + cds/urcu/details/gpt.h | 225 + cds/urcu/details/sh.h | 169 + cds/urcu/details/sh_decl.h | 203 + cds/urcu/details/sig_buffered.h | 235 + cds/urcu/details/sig_threaded.h | 238 + cds/urcu/dispose_thread.h | 199 + cds/urcu/exempt_ptr.h | 216 + cds/urcu/general_buffered.h | 153 + cds/urcu/general_instant.h | 138 + cds/urcu/general_threaded.h | 160 + cds/urcu/options.h | 66 + cds/urcu/signal_buffered.h | 166 + cds/urcu/signal_threaded.h | 174 + cds/user_setup/allocator.h | 42 + cds/user_setup/cache_line.h | 30 + cds/user_setup/threading.h | 24 + cds/version.h | 18 + change.log | 230 + doxygen/cds.doxy | 1758 +++++ doxygen/doxygen.log | 15 + doxygen/footer.html | 9 + doxygen/index.html | 10 + license.txt | 21 + make_distrib.pl | 187 + make_docs.bat | 6 + projects/Win/build-msbuild.cmd | 14 + projects/Win/build-vc10.cmd | 2 + projects/Win/build-vc11.cmd | 2 + projects/Win/build-vc12.cmd | 2 + projects/Win/build-vc9.cmd | 9 + projects/Win/vc10/cds.sln | 618 ++ projects/Win/vc10/cds.vcxproj | 1047 +++ projects/Win/vc10/cds.vcxproj.filters | 1338 ++++ projects/Win/vc10/hdr-test-deque.vcxproj | 555 ++ projects/Win/vc10/hdr-test-map.vcxproj | 619 ++ .../Win/vc10/hdr-test-map.vcxproj.filters | 233 + projects/Win/vc10/hdr-test-misc.vcxproj | 558 ++ .../Win/vc10/hdr-test-ordered-list.vcxproj | 608 ++ .../hdr-test-ordered-list.vcxproj.filters | 195 + .../Win/vc10/hdr-test-priority_queue.vcxproj | 542 ++ .../hdr-test-priority_queue.vcxproj.filters | 43 + projects/Win/vc10/hdr-test-queue.vcxproj | 587 ++ .../Win/vc10/hdr-test-queue.vcxproj.filters | 132 + projects/Win/vc10/hdr-test-set.vcxproj | 652 ++ .../Win/vc10/hdr-test-set.vcxproj.filters | 345 + projects/Win/vc10/hdr-test-stack.vcxproj | 564 ++ .../Win/vc10/hdr-test-striped-set.vcxproj | 597 ++ .../vc10/hdr-test-striped-set.vcxproj.filters | 174 + projects/Win/vc10/hdr-test-tree.vcxproj | 567 ++ .../Win/vc10/hdr-test-tree.vcxproj.filters | 118 + projects/Win/vc10/unit-map-delodd.vcxproj | 488 ++ projects/Win/vc10/unit-map-find.vcxproj | 490 ++ projects/Win/vc10/unit-map-insdel.vcxproj | 493 ++ projects/Win/vc10/unit-misc.vcxproj | 493 ++ projects/Win/vc10/unit-misc.vcxproj.filters | 45 + projects/Win/vc10/unit-pqueue.vcxproj | 494 ++ projects/Win/vc10/unit-prerequisites.vcxproj | 558 ++ projects/Win/vc10/unit-queue.vcxproj | 497 ++ projects/Win/vc10/unit-set-delodd.vcxproj | 487 ++ projects/Win/vc10/unit-set-insdel.vcxproj | 498 ++ projects/Win/vc10/unit-stack.vcxproj | 492 ++ projects/Win/vc11/cds.sln | 618 ++ projects/Win/vc11/cds.vcxproj | 1011 +++ projects/Win/vc11/cds.vcxproj.filters | 1332 ++++ projects/Win/vc11/hdr-test-deque.vcxproj | 552 ++ projects/Win/vc11/hdr-test-map.vcxproj | 610 ++ .../Win/vc11/hdr-test-map.vcxproj.filters | 233 + projects/Win/vc11/hdr-test-misc.vcxproj | 550 ++ .../Win/vc11/hdr-test-ordered-list.vcxproj | 599 ++ .../hdr-test-ordered-list.vcxproj.filters | 195 + .../Win/vc11/hdr-test-priority_queue.vcxproj | 549 ++ .../hdr-test-priority_queue.vcxproj.filters | 43 + projects/Win/vc11/hdr-test-queue.vcxproj | 578 ++ .../Win/vc11/hdr-test-queue.vcxproj.filters | 132 + projects/Win/vc11/hdr-test-set.vcxproj | 643 ++ .../Win/vc11/hdr-test-set.vcxproj.filters | 345 + projects/Win/vc11/hdr-test-stack.vcxproj | 555 ++ .../Win/vc11/hdr-test-striped-set.vcxproj | 588 ++ .../vc11/hdr-test-striped-set.vcxproj.filters | 174 + projects/Win/vc11/hdr-test-tree.vcxproj | 574 ++ .../Win/vc11/hdr-test-tree.vcxproj.filters | 118 + projects/Win/vc11/unit-map-delodd.vcxproj | 482 ++ projects/Win/vc11/unit-map-find.vcxproj | 484 ++ projects/Win/vc11/unit-map-insdel.vcxproj | 486 ++ projects/Win/vc11/unit-misc.vcxproj | 493 ++ projects/Win/vc11/unit-misc.vcxproj.filters | 45 + projects/Win/vc11/unit-pqueue.vcxproj | 492 ++ projects/Win/vc11/unit-prerequisites.vcxproj | 562 ++ projects/Win/vc11/unit-queue.vcxproj | 493 ++ projects/Win/vc11/unit-set-delodd.vcxproj | 482 ++ projects/Win/vc11/unit-set-insdel.vcxproj | 493 ++ projects/Win/vc11/unit-stack.vcxproj | 490 ++ projects/Win/vc12/cds.sln | 619 ++ projects/Win/vc12/cds.vcxproj | 1018 +++ projects/Win/vc12/cds.vcxproj.filters | 1329 ++++ projects/Win/vc12/hdr-test-deque.vcxproj | 561 ++ projects/Win/vc12/hdr-test-map.vcxproj | 615 ++ .../Win/vc12/hdr-test-map.vcxproj.filters | 233 + projects/Win/vc12/hdr-test-misc.vcxproj | 555 ++ .../Win/vc12/hdr-test-ordered-list.vcxproj | 604 ++ .../hdr-test-ordered-list.vcxproj.filters | 195 + .../Win/vc12/hdr-test-priority_queue.vcxproj | 554 ++ .../hdr-test-priority_queue.vcxproj.filters | 43 + projects/Win/vc12/hdr-test-queue.vcxproj | 583 ++ .../Win/vc12/hdr-test-queue.vcxproj.filters | 132 + projects/Win/vc12/hdr-test-set.vcxproj | 648 ++ .../Win/vc12/hdr-test-set.vcxproj.filters | 345 + projects/Win/vc12/hdr-test-stack.vcxproj | 560 ++ .../Win/vc12/hdr-test-striped-set.vcxproj | 593 ++ .../vc12/hdr-test-striped-set.vcxproj.filters | 174 + projects/Win/vc12/hdr-test-tree.vcxproj | 579 ++ .../Win/vc12/hdr-test-tree.vcxproj.filters | 118 + projects/Win/vc12/unit-map-delodd.vcxproj | 488 ++ projects/Win/vc12/unit-map-find.vcxproj | 490 ++ projects/Win/vc12/unit-map-insdel.vcxproj | 493 ++ projects/Win/vc12/unit-misc.vcxproj | 499 ++ projects/Win/vc12/unit-misc.vcxproj.filters | 45 + projects/Win/vc12/unit-pqueue.vcxproj | 498 ++ projects/Win/vc12/unit-prerequisites.vcxproj | 568 ++ projects/Win/vc12/unit-queue.vcxproj | 499 ++ projects/Win/vc12/unit-set-delodd.vcxproj | 488 ++ projects/Win/vc12/unit-set-insdel.vcxproj | 499 ++ projects/Win/vc12/unit-stack.vcxproj | 496 ++ projects/Win/vc9/cds.ncb | Bin 0 -> 45001728 bytes projects/Win/vc9/cds.sln | 446 ++ projects/Win/vc9/cds.suo | Bin 0 -> 58880 bytes projects/Win/vc9/cds.vcproj | 2332 ++++++ projects/Win/vc9/cds.vcproj.user.u.user | 177 + projects/Win/vc9/hdr-test-deque.vcproj | 552 ++ .../Win/vc9/hdr-test-deque.vcproj.user.u.user | 177 + projects/Win/vc9/hdr-test-map.vcproj | 828 +++ .../Win/vc9/hdr-test-map.vcproj.user.u.user | 177 + projects/Win/vc9/hdr-test-misc.vcproj | 568 ++ .../Win/vc9/hdr-test-misc.vcproj.user.u.user | 177 + projects/Win/vc9/hdr-test-ordered-list.vcproj | 772 ++ .../hdr-test-ordered-list.vcproj.user.u.user | 177 + .../Win/vc9/hdr-test-priority_queue.vcproj | 572 ++ ...hdr-test-priority_queue.vcproj.user.u.user | 177 + .../Win/vc9/hdr-test-project-template.vcproj | 524 ++ projects/Win/vc9/hdr-test-queue.vcproj | 688 ++ .../Win/vc9/hdr-test-queue.vcproj.user.u.user | 177 + projects/Win/vc9/hdr-test-set.vcproj | 972 +++ .../Win/vc9/hdr-test-set.vcproj.user.u.user | 177 + projects/Win/vc9/hdr-test-stack.vcproj | 588 ++ .../Win/vc9/hdr-test-stack.vcproj.user.u.user | 177 + projects/Win/vc9/hdr-test-striped-set.vcproj | 744 ++ .../hdr-test-striped-set.vcproj.user.u.user | 177 + projects/Win/vc9/hdr-test-tree.vcproj | 672 ++ .../Win/vc9/hdr-test-tree.vcproj.user.u.user | 177 + projects/Win/vc9/unit-map-delodd.vcproj | 500 ++ .../vc9/unit-map-delodd.vcproj.user.u.user | 177 + projects/Win/vc9/unit-map-find.vcproj | 508 ++ .../Win/vc9/unit-map-find.vcproj.user.u.user | 177 + projects/Win/vc9/unit-map-insdel.vcproj | 520 ++ .../vc9/unit-map-insdel.vcproj.user.u.user | 177 + projects/Win/vc9/unit-misc.vcproj | 544 ++ projects/Win/vc9/unit-misc.vcproj.user.u.user | 177 + projects/Win/vc9/unit-pqueue.vcproj | 532 ++ .../Win/vc9/unit-pqueue.vcproj.user.u.user | 177 + projects/Win/vc9/unit-prerequisites.vcproj | 748 ++ .../vc9/unit-prerequisites.vcproj.user.u.user | 177 + projects/Win/vc9/unit-project-template.vcproj | 343 + projects/Win/vc9/unit-queue.vcproj | 536 ++ .../Win/vc9/unit-queue.vcproj.user.u.user | 177 + projects/Win/vc9/unit-set-delodd.vcproj | 500 ++ .../vc9/unit-set-delodd.vcproj.user.u.user | 177 + projects/Win/vc9/unit-set-insdel.vcproj | 536 ++ .../vc9/unit-set-insdel.vcproj.user.u.user | 177 + projects/Win/vc9/unit-stack.vcproj | 524 ++ .../Win/vc9/unit-stack.vcproj.user.u.user | 177 + projects/android/jni/Android.mk | 185 + projects/android/jni/Application.mk | 10 + projects/android/jni/build.sh | 3 + projects/source.libcds.mk | 11 + projects/source.test-common.mk | 6 + projects/source.test-hdr.mk | 303 + projects/source.test-hdr.offsetof.mk | 103 + projects/source.unit.map.mk | 12 + projects/source.unit.misc.mk | 8 + projects/source.unit.pqueue.mk | 5 + projects/source.unit.queue.mk | 7 + projects/source.unit.set.mk | 12 + projects/source.unit.stack.mk | 6 + readme | 149 + scripts/tab2space.pl | 78 + src/dllmain.cpp | 219 + src/hrc_gc.cpp | 408 + src/hzp_const.h | 45 + src/hzp_gc.cpp | 364 + src/init.cpp | 65 + src/michael_heap.cpp | 327 + src/ptb_gc.cpp | 307 + src/topology_hpux.cpp | 79 + src/topology_linux.cpp | 45 + src/topology_osx.cpp | 25 + src/urcu_gp.cpp | 11 + src/urcu_sh.cpp | 13 + tests/cppunit/cppunit_mini.h | 385 + tests/cppunit/cppunit_proxy.h | 34 + tests/cppunit/file_reporter.h | 145 + tests/cppunit/test_beans.h | 29 + tests/cppunit/test_main.cpp | 484 ++ tests/cppunit/thread.cpp | 126 + tests/cppunit/thread.h | 133 + tests/data/split.pl | 39 + tests/data/test-debug.conf | 196 + tests/data/test-express.conf | 194 + tests/data/test.conf | 189 + tests/data/text.txt | 6611 +++++++++++++++++ tests/test-hdr/deque/hdr_deque.h | 390 + tests/test-hdr/deque/hdr_fcdeque.cpp | 175 + tests/test-hdr/deque/hdr_intrusive_deque.h | 334 + .../deque/hdr_intrusive_michael_deque_hp.cpp | 166 + .../deque/hdr_intrusive_michael_deque_ptb.cpp | 142 + tests/test-hdr/deque/hdr_michael_deque_hp.cpp | 45 + .../test-hdr/deque/hdr_michael_deque_ptb.cpp | 43 + tests/test-hdr/map/hdr_cuckoo_map.cpp | 655 ++ tests/test-hdr/map/hdr_cuckoo_map.h | 395 + tests/test-hdr/map/hdr_map.h | 931 +++ tests/test-hdr/map/hdr_michael_map_hp.cpp | 90 + tests/test-hdr/map/hdr_michael_map_hrc.cpp | 87 + .../test-hdr/map/hdr_michael_map_lazy_hp.cpp | 87 + .../test-hdr/map/hdr_michael_map_lazy_hrc.cpp | 87 + .../map/hdr_michael_map_lazy_nogc.cpp | 85 + .../test-hdr/map/hdr_michael_map_lazy_ptb.cpp | 87 + .../map/hdr_michael_map_lazy_rcu_gpb.cpp | 90 + .../map/hdr_michael_map_lazy_rcu_gpi.cpp | 90 + .../map/hdr_michael_map_lazy_rcu_gpt.cpp | 90 + .../map/hdr_michael_map_lazy_rcu_shb.cpp | 96 + .../map/hdr_michael_map_lazy_rcu_sht.cpp | 96 + tests/test-hdr/map/hdr_michael_map_nogc.cpp | 85 + tests/test-hdr/map/hdr_michael_map_ptb.cpp | 87 + .../test-hdr/map/hdr_michael_map_rcu_gpb.cpp | 90 + .../test-hdr/map/hdr_michael_map_rcu_gpi.cpp | 90 + .../test-hdr/map/hdr_michael_map_rcu_gpt.cpp | 90 + .../test-hdr/map/hdr_michael_map_rcu_shb.cpp | 96 + .../test-hdr/map/hdr_michael_map_rcu_sht.cpp | 96 + .../hdr_refinable_hashmap_boost_flat_map.cpp | 178 + .../map/hdr_refinable_hashmap_boost_list.cpp | 163 + .../map/hdr_refinable_hashmap_boost_map.cpp | 167 + ..._refinable_hashmap_boost_unordered_map.cpp | 146 + .../map/hdr_refinable_hashmap_hashmap_std.cpp | 148 + .../map/hdr_refinable_hashmap_hashmap_vc.cpp | 147 + .../map/hdr_refinable_hashmap_list.cpp | 139 + .../map/hdr_refinable_hashmap_map.cpp | 145 + .../map/hdr_refinable_hashmap_slist.cpp | 163 + tests/test-hdr/map/hdr_skiplist_map.h | 581 ++ tests/test-hdr/map/hdr_skiplist_map_hp.cpp | 313 + tests/test-hdr/map/hdr_skiplist_map_hrc.cpp | 311 + tests/test-hdr/map/hdr_skiplist_map_nogc.cpp | 311 + tests/test-hdr/map/hdr_skiplist_map_ptb.cpp | 311 + tests/test-hdr/map/hdr_skiplist_map_rcu.h | 507 ++ .../test-hdr/map/hdr_skiplist_map_rcu_gpb.cpp | 317 + .../test-hdr/map/hdr_skiplist_map_rcu_gpi.cpp | 319 + .../test-hdr/map/hdr_skiplist_map_rcu_gpt.cpp | 317 + .../test-hdr/map/hdr_skiplist_map_rcu_shb.cpp | 368 + .../test-hdr/map/hdr_skiplist_map_rcu_sht.cpp | 368 + tests/test-hdr/map/hdr_splitlist_map_hp.cpp | 131 + tests/test-hdr/map/hdr_splitlist_map_hrc.cpp | 131 + .../map/hdr_splitlist_map_lazy_hp.cpp | 131 + .../map/hdr_splitlist_map_lazy_hrc.cpp | 131 + .../map/hdr_splitlist_map_lazy_nogc.cpp | 131 + .../map/hdr_splitlist_map_lazy_ptb.cpp | 131 + .../map/hdr_splitlist_map_lazy_rcu_gpb.cpp | 134 + .../map/hdr_splitlist_map_lazy_rcu_gpi.cpp | 134 + .../map/hdr_splitlist_map_lazy_rcu_gpt.cpp | 134 + .../map/hdr_splitlist_map_lazy_rcu_shb.cpp | 140 + .../map/hdr_splitlist_map_lazy_rcu_sht.cpp | 140 + tests/test-hdr/map/hdr_splitlist_map_nogc.cpp | 131 + tests/test-hdr/map/hdr_splitlist_map_ptb.cpp | 131 + .../map/hdr_splitlist_map_rcu_gpb.cpp | 134 + .../map/hdr_splitlist_map_rcu_gpi.cpp | 134 + .../map/hdr_splitlist_map_rcu_gpt.cpp | 134 + .../map/hdr_splitlist_map_rcu_shb.cpp | 140 + .../map/hdr_splitlist_map_rcu_sht.cpp | 140 + .../hdr_striped_hashmap_boost_flat_map.cpp | 169 + .../map/hdr_striped_hashmap_boost_list.cpp | 154 + .../map/hdr_striped_hashmap_boost_map.cpp | 157 + ...dr_striped_hashmap_boost_unordered_map.cpp | 134 + .../map/hdr_striped_hashmap_hashmap_std.cpp | 138 + .../map/hdr_striped_hashmap_hashmap_vc.cpp | 138 + .../test-hdr/map/hdr_striped_hashmap_list.cpp | 131 + .../test-hdr/map/hdr_striped_hashmap_map.cpp | 134 + .../map/hdr_striped_hashmap_slist.cpp | 153 + tests/test-hdr/map/hdr_striped_map.h | 595 ++ tests/test-hdr/map/hdr_striped_map_reg.cpp | 5 + tests/test-hdr/map/print_skiplist_stat.h | 44 + tests/test-hdr/misc/allocator_test.cpp | 168 + tests/test-hdr/misc/bitop_st.cpp | 85 + tests/test-hdr/misc/cxx11_atomic_class.cpp | 778 ++ tests/test-hdr/misc/cxx11_atomic_func.cpp | 738 ++ .../misc/cxx11_convert_memory_order.h | 47 + tests/test-hdr/misc/find_option.cpp | 196 + tests/test-hdr/misc/hash_tuple.cpp | 131 + tests/test-hdr/misc/michael_allocator.cpp | 8 + tests/test-hdr/misc/michael_allocator.h | 197 + tests/test-hdr/misc/permutation_generator.cpp | 77 + tests/test-hdr/misc/thread_init_fini.cpp | 77 + .../ordered_list/hdr_intrusive_lazy.h | 887 +++ .../ordered_list/hdr_intrusive_lazy_hp.cpp | 130 + .../ordered_list/hdr_intrusive_lazy_hrc.cpp | 64 + .../ordered_list/hdr_intrusive_lazy_nogc.cpp | 129 + .../ordered_list/hdr_intrusive_lazy_ptb.cpp | 129 + .../hdr_intrusive_lazy_rcu_gpb.cpp | 132 + .../hdr_intrusive_lazy_rcu_gpi.cpp | 133 + .../hdr_intrusive_lazy_rcu_gpt.cpp | 133 + .../hdr_intrusive_lazy_rcu_shb.cpp | 158 + .../hdr_intrusive_lazy_rcu_sht.cpp | 151 + .../ordered_list/hdr_intrusive_michael.h | 887 +++ .../ordered_list/hdr_intrusive_michael_hp.cpp | 130 + .../hdr_intrusive_michael_hrc.cpp | 64 + .../hdr_intrusive_michael_list_rcu_gpb.cpp | 134 + .../hdr_intrusive_michael_list_rcu_gpi.cpp | 134 + .../hdr_intrusive_michael_list_rcu_gpt.cpp | 134 + .../hdr_intrusive_michael_list_rcu_shb.cpp | 152 + .../hdr_intrusive_michael_list_rcu_sht.cpp | 152 + .../hdr_intrusive_michael_nogc.cpp | 129 + .../hdr_intrusive_michael_ptb.cpp | 129 + tests/test-hdr/ordered_list/hdr_lazy.h | 774 ++ tests/test-hdr/ordered_list/hdr_lazy_hp.cpp | 102 + tests/test-hdr/ordered_list/hdr_lazy_hrc.cpp | 100 + tests/test-hdr/ordered_list/hdr_lazy_kv.h | 685 ++ .../test-hdr/ordered_list/hdr_lazy_kv_hp.cpp | 104 + .../test-hdr/ordered_list/hdr_lazy_kv_hrc.cpp | 100 + .../ordered_list/hdr_lazy_kv_nogc.cpp | 103 + .../test-hdr/ordered_list/hdr_lazy_kv_ptb.cpp | 100 + .../ordered_list/hdr_lazy_kv_rcu_gpb.cpp | 103 + .../ordered_list/hdr_lazy_kv_rcu_gpi.cpp | 103 + .../ordered_list/hdr_lazy_kv_rcu_gpt.cpp | 103 + .../ordered_list/hdr_lazy_kv_rcu_shb.cpp | 122 + .../ordered_list/hdr_lazy_kv_rcu_sht.cpp | 119 + tests/test-hdr/ordered_list/hdr_lazy_nogc.cpp | 100 + tests/test-hdr/ordered_list/hdr_lazy_ptb.cpp | 100 + .../ordered_list/hdr_lazy_rcu_gpb.cpp | 103 + .../ordered_list/hdr_lazy_rcu_gpi.cpp | 103 + .../ordered_list/hdr_lazy_rcu_gpt.cpp | 103 + .../ordered_list/hdr_lazy_rcu_shb.cpp | 119 + .../ordered_list/hdr_lazy_rcu_sht.cpp | 119 + tests/test-hdr/ordered_list/hdr_michael.h | 774 ++ .../test-hdr/ordered_list/hdr_michael_hp.cpp | 102 + .../test-hdr/ordered_list/hdr_michael_hrc.cpp | 100 + tests/test-hdr/ordered_list/hdr_michael_kv.h | 697 ++ .../ordered_list/hdr_michael_kv_hp.cpp | 104 + .../ordered_list/hdr_michael_kv_hrc.cpp | 100 + .../ordered_list/hdr_michael_kv_nogc.cpp | 103 + .../ordered_list/hdr_michael_kv_ptb.cpp | 100 + .../ordered_list/hdr_michael_kv_rcu_gpb.cpp | 102 + .../ordered_list/hdr_michael_kv_rcu_gpi.cpp | 102 + .../ordered_list/hdr_michael_kv_rcu_gpt.cpp | 102 + .../ordered_list/hdr_michael_kv_rcu_shb.cpp | 117 + .../ordered_list/hdr_michael_kv_rcu_sht.cpp | 117 + .../ordered_list/hdr_michael_nogc.cpp | 100 + .../test-hdr/ordered_list/hdr_michael_ptb.cpp | 100 + .../ordered_list/hdr_michael_rcu_gpb.cpp | 104 + .../ordered_list/hdr_michael_rcu_gpi.cpp | 104 + .../ordered_list/hdr_michael_rcu_gpt.cpp | 104 + .../ordered_list/hdr_michael_rcu_shb.cpp | 119 + .../ordered_list/hdr_michael_rcu_sht.cpp | 119 + .../hdr_fcpqueue_boost_stable_vector.cpp | 68 + .../priority_queue/hdr_fcpqueue_deque.cpp | 53 + .../priority_queue/hdr_fcpqueue_vector.cpp | 42 + .../hdr_intrusive_mspqueue_dyn.cpp | 80 + .../hdr_intrusive_mspqueue_static.cpp | 80 + .../priority_queue/hdr_intrusive_pqueue.h | 214 + .../priority_queue/hdr_mspqueue_dyn.cpp | 80 + .../priority_queue/hdr_mspqueue_static.cpp | 80 + tests/test-hdr/priority_queue/hdr_pqueue.h | 377 + .../priority_queue/hdr_priority_queue_reg.cpp | 8 + tests/test-hdr/queue/hdr_basketqueue_hrc.cpp | 107 + tests/test-hdr/queue/hdr_basketqueue_hzp.cpp | 107 + tests/test-hdr/queue/hdr_basketqueue_ptb.cpp | 107 + tests/test-hdr/queue/hdr_fcqueue.cpp | 83 + .../queue/hdr_intrusive_basketqueue_hp.cpp | 188 + .../queue/hdr_intrusive_basketqueue_hrc.cpp | 59 + .../queue/hdr_intrusive_basketqueue_node.h | 39 + .../queue/hdr_intrusive_basketqueue_ptb.cpp | 165 + .../test-hdr/queue/hdr_intrusive_fcqueue.cpp | 298 + .../queue/hdr_intrusive_moirqueue_hp.cpp | 186 + .../queue/hdr_intrusive_moirqueue_hrc.cpp | 59 + .../queue/hdr_intrusive_moirqueue_ptb.cpp | 168 + tests/test-hdr/queue/hdr_intrusive_msqueue.h | 519 ++ .../queue/hdr_intrusive_msqueue_hp.cpp | 191 + .../queue/hdr_intrusive_msqueue_hrc.cpp | 60 + .../queue/hdr_intrusive_msqueue_ptb.cpp | 167 + .../hdr_intrusive_optimisticqueue_hp.cpp | 211 + .../hdr_intrusive_optimisticqueue_ptb.cpp | 187 + .../queue/hdr_intrusive_segmented_queue.h | 208 + .../hdr_intrusive_segmented_queue_hp.cpp | 61 + .../hdr_intrusive_segmented_queue_ptb.cpp | 59 + .../queue/hdr_intrusive_singlelink_node.h | 37 + .../hdr_intrusive_tsigas_cycle_queue.cpp | 79 + .../hdr_intrusive_vyukovmpmc_cycle_queue.cpp | 79 + tests/test-hdr/queue/hdr_moirqueue_hrc.cpp | 107 + tests/test-hdr/queue/hdr_moirqueue_hzp.cpp | 107 + tests/test-hdr/queue/hdr_moirqueue_ptb.cpp | 107 + tests/test-hdr/queue/hdr_msqueue_hrc.cpp | 107 + tests/test-hdr/queue/hdr_msqueue_hzp.cpp | 107 + tests/test-hdr/queue/hdr_msqueue_ptb.cpp | 107 + tests/test-hdr/queue/hdr_optimistic_hzp.cpp | 107 + tests/test-hdr/queue/hdr_optimistic_ptb.cpp | 107 + tests/test-hdr/queue/hdr_rwqueue.cpp | 28 + tests/test-hdr/queue/hdr_segmented_queue.h | 242 + .../test-hdr/queue/hdr_segmented_queue_hp.cpp | 54 + .../queue/hdr_segmented_queue_ptb.cpp | 52 + .../test-hdr/queue/hdr_vyukov_mpmc_cyclic.cpp | 53 + tests/test-hdr/queue/queue_test_header.cpp | 4 + tests/test-hdr/queue/queue_test_header.h | 423 ++ tests/test-hdr/set/hdr_cuckoo_set.cpp | 630 ++ tests/test-hdr/set/hdr_cuckoo_set.h | 611 ++ .../hdr_intrusive_cuckoo_refinable_set.cpp | 646 ++ .../test-hdr/set/hdr_intrusive_cuckoo_set.cpp | 615 ++ tests/test-hdr/set/hdr_intrusive_cuckoo_set.h | 651 ++ .../set/hdr_intrusive_michael_set_hp.cpp | 150 + .../set/hdr_intrusive_michael_set_hp_lazy.cpp | 150 + .../set/hdr_intrusive_michael_set_hrc.cpp | 74 + .../hdr_intrusive_michael_set_hrc_lazy.cpp | 74 + .../set/hdr_intrusive_michael_set_nogc.cpp | 147 + .../hdr_intrusive_michael_set_nogc_lazy.cpp | 147 + .../set/hdr_intrusive_michael_set_ptb.cpp | 149 + .../hdr_intrusive_michael_set_ptb_lazy.cpp | 149 + .../set/hdr_intrusive_michael_set_rcu_gpb.cpp | 154 + ...hdr_intrusive_michael_set_rcu_gpb_lazy.cpp | 154 + .../set/hdr_intrusive_michael_set_rcu_gpi.cpp | 154 + ...hdr_intrusive_michael_set_rcu_gpi_lazy.cpp | 154 + .../set/hdr_intrusive_michael_set_rcu_gpt.cpp | 154 + ...hdr_intrusive_michael_set_rcu_gpt_lazy.cpp | 154 + .../set/hdr_intrusive_michael_set_rcu_shb.cpp | 168 + ...hdr_intrusive_michael_set_rcu_shb_lazy.cpp | 168 + .../set/hdr_intrusive_michael_set_rcu_sht.cpp | 168 + ...hdr_intrusive_michael_set_rcu_sht_lazy.cpp | 167 + ...hdr_intrusive_refinable_hashset_avlset.cpp | 101 + .../hdr_intrusive_refinable_hashset_list.cpp | 196 + .../hdr_intrusive_refinable_hashset_set.cpp | 101 + .../hdr_intrusive_refinable_hashset_sgset.cpp | 101 + .../hdr_intrusive_refinable_hashset_slist.cpp | 196 + ...r_intrusive_refinable_hashset_splayset.cpp | 101 + ...r_intrusive_refinable_hashset_treapset.cpp | 116 + .../hdr_intrusive_refinable_hashset_uset.cpp | 183 + tests/test-hdr/set/hdr_intrusive_set.h | 1748 +++++ .../set/hdr_intrusive_skiplist_hp.cpp | 329 + .../set/hdr_intrusive_skiplist_hp_member.cpp | 325 + .../set/hdr_intrusive_skiplist_hrc.cpp | 327 + .../set/hdr_intrusive_skiplist_nogc.cpp | 309 + .../hdr_intrusive_skiplist_nogc_member.cpp | 309 + .../set/hdr_intrusive_skiplist_ptb.cpp | 327 + .../set/hdr_intrusive_skiplist_ptb_member.cpp | 325 + .../set/hdr_intrusive_skiplist_rcu_gpb.cpp | 333 + .../hdr_intrusive_skiplist_rcu_gpb_member.cpp | 330 + .../set/hdr_intrusive_skiplist_rcu_gpi.cpp | 331 + .../hdr_intrusive_skiplist_rcu_gpi_member.cpp | 330 + .../set/hdr_intrusive_skiplist_rcu_gpt.cpp | 331 + .../hdr_intrusive_skiplist_rcu_gpt_member.cpp | 330 + .../set/hdr_intrusive_skiplist_rcu_shb.cpp | 372 + .../hdr_intrusive_skiplist_rcu_shb_member.cpp | 368 + .../set/hdr_intrusive_skiplist_rcu_sht.cpp | 369 + .../hdr_intrusive_skiplist_rcu_sht_member.cpp | 368 + .../test-hdr/set/hdr_intrusive_skiplist_set.h | 787 ++ .../set/hdr_intrusive_skiplist_set_rcu.h | 778 ++ .../set/hdr_intrusive_splitlist_set_hp.cpp | 318 + .../hdr_intrusive_splitlist_set_hp_lazy.cpp | 319 + .../set/hdr_intrusive_splitlist_set_hrc.cpp | 155 + .../hdr_intrusive_splitlist_set_hrc_lazy.cpp | 155 + .../set/hdr_intrusive_splitlist_set_nogc.cpp | 309 + .../hdr_intrusive_splitlist_set_nogc_lazy.cpp | 310 + .../set/hdr_intrusive_splitlist_set_ptb.cpp | 318 + .../hdr_intrusive_splitlist_set_ptb_lazy.cpp | 318 + .../hdr_intrusive_splitlist_set_rcu_gpb.cpp | 322 + ...r_intrusive_splitlist_set_rcu_gpb_lazy.cpp | 322 + .../hdr_intrusive_splitlist_set_rcu_gpi.cpp | 322 + ...r_intrusive_splitlist_set_rcu_gpi_lazy.cpp | 322 + .../hdr_intrusive_splitlist_set_rcu_gpt.cpp | 322 + ...r_intrusive_splitlist_set_rcu_gpt_lazy.cpp | 322 + .../hdr_intrusive_splitlist_set_rcu_shb.cpp | 348 + ...r_intrusive_splitlist_set_rcu_shb_lazy.cpp | 348 + .../hdr_intrusive_splitlist_set_rcu_sht.cpp | 348 + ...r_intrusive_splitlist_set_rcu_sht_lazy.cpp | 347 + .../hdr_intrusive_striped_hashset_avlset.cpp | 96 + .../hdr_intrusive_striped_hashset_list.cpp | 187 + .../set/hdr_intrusive_striped_hashset_set.cpp | 96 + .../hdr_intrusive_striped_hashset_sgset.cpp | 96 + .../hdr_intrusive_striped_hashset_slist.cpp | 187 + ...hdr_intrusive_striped_hashset_splayset.cpp | 96 + ...hdr_intrusive_striped_hashset_treapset.cpp | 111 + .../hdr_intrusive_striped_hashset_uset.cpp | 178 + .../set/hdr_intrusive_striped_set.cpp | 3 + .../test-hdr/set/hdr_intrusive_striped_set.h | 734 ++ tests/test-hdr/set/hdr_michael_set_hp.cpp | 91 + tests/test-hdr/set/hdr_michael_set_hrc.cpp | 88 + .../test-hdr/set/hdr_michael_set_lazy_hp.cpp | 89 + .../test-hdr/set/hdr_michael_set_lazy_hrc.cpp | 88 + .../set/hdr_michael_set_lazy_nogc.cpp | 88 + .../test-hdr/set/hdr_michael_set_lazy_ptb.cpp | 88 + .../set/hdr_michael_set_lazy_rcu_gpb.cpp | 91 + .../set/hdr_michael_set_lazy_rcu_gpi.cpp | 91 + .../set/hdr_michael_set_lazy_rcu_gpt.cpp | 91 + .../set/hdr_michael_set_lazy_rcu_shb.cpp | 98 + .../set/hdr_michael_set_lazy_rcu_sht.cpp | 98 + tests/test-hdr/set/hdr_michael_set_nogc.cpp | 88 + tests/test-hdr/set/hdr_michael_set_ptb.cpp | 88 + .../test-hdr/set/hdr_michael_set_rcu_gpb.cpp | 91 + .../test-hdr/set/hdr_michael_set_rcu_gpi.cpp | 91 + .../test-hdr/set/hdr_michael_set_rcu_gpt.cpp | 91 + .../test-hdr/set/hdr_michael_set_rcu_shb.cpp | 98 + .../test-hdr/set/hdr_michael_set_rcu_sht.cpp | 98 + .../hdr_refinable_hashset_boost_flat_set.cpp | 169 + .../set/hdr_refinable_hashset_boost_list.cpp | 165 + .../set/hdr_refinable_hashset_boost_set.cpp | 167 + ..._refinable_hashset_boost_stable_vector.cpp | 166 + ..._refinable_hashset_boost_unordered_set.cpp | 154 + .../hdr_refinable_hashset_boost_vector.cpp | 167 + .../set/hdr_refinable_hashset_hashset_std.cpp | 156 + .../set/hdr_refinable_hashset_hashset_vc.cpp | 156 + .../set/hdr_refinable_hashset_list.cpp | 142 + .../set/hdr_refinable_hashset_set.cpp | 144 + .../set/hdr_refinable_hashset_slist.cpp | 165 + .../set/hdr_refinable_hashset_vector.cpp | 142 + tests/test-hdr/set/hdr_set.h | 1127 +++ tests/test-hdr/set/hdr_skiplist_set.h | 615 ++ tests/test-hdr/set/hdr_skiplist_set_hp.cpp | 313 + tests/test-hdr/set/hdr_skiplist_set_hrc.cpp | 311 + tests/test-hdr/set/hdr_skiplist_set_nogc.cpp | 311 + tests/test-hdr/set/hdr_skiplist_set_ptb.cpp | 311 + tests/test-hdr/set/hdr_skiplist_set_rcu.h | 502 ++ .../test-hdr/set/hdr_skiplist_set_rcu_gpb.cpp | 317 + .../test-hdr/set/hdr_skiplist_set_rcu_gpi.cpp | 319 + .../test-hdr/set/hdr_skiplist_set_rcu_gpt.cpp | 317 + .../test-hdr/set/hdr_skiplist_set_rcu_shb.cpp | 368 + .../test-hdr/set/hdr_skiplist_set_rcu_sht.cpp | 368 + tests/test-hdr/set/hdr_splitlist_set_hp.cpp | 128 + tests/test-hdr/set/hdr_splitlist_set_hrc.cpp | 128 + .../set/hdr_splitlist_set_lazy_hp.cpp | 128 + .../set/hdr_splitlist_set_lazy_hrc.cpp | 128 + .../set/hdr_splitlist_set_lazy_nogc.cpp | 128 + .../set/hdr_splitlist_set_lazy_ptb.cpp | 128 + .../set/hdr_splitlist_set_lazy_rcu_gpb.cpp | 131 + .../set/hdr_splitlist_set_lazy_rcu_gpi.cpp | 131 + .../set/hdr_splitlist_set_lazy_rcu_gpt.cpp | 131 + .../set/hdr_splitlist_set_lazy_rcu_shb.cpp | 138 + .../set/hdr_splitlist_set_lazy_rcu_sht.cpp | 136 + tests/test-hdr/set/hdr_splitlist_set_nogc.cpp | 128 + tests/test-hdr/set/hdr_splitlist_set_ptb.cpp | 128 + .../set/hdr_splitlist_set_rcu_gpb.cpp | 131 + .../set/hdr_splitlist_set_rcu_gpi.cpp | 131 + .../set/hdr_splitlist_set_rcu_gpt.cpp | 131 + .../set/hdr_splitlist_set_rcu_shb.cpp | 136 + .../set/hdr_splitlist_set_rcu_sht.cpp | 136 + .../hdr_striped_hashset_boost_flat_set.cpp | 171 + .../set/hdr_striped_hashset_boost_list.cpp | 153 + .../set/hdr_striped_hashset_boost_set.cpp | 159 + ...dr_striped_hashset_boost_stable_vector.cpp | 156 + ...dr_striped_hashset_boost_unordered_set.cpp | 145 + .../set/hdr_striped_hashset_boost_vector.cpp | 156 + .../set/hdr_striped_hashset_hashset_std.cpp | 149 + .../set/hdr_striped_hashset_hashset_vc.cpp | 149 + .../test-hdr/set/hdr_striped_hashset_list.cpp | 134 + .../test-hdr/set/hdr_striped_hashset_set.cpp | 137 + .../set/hdr_striped_hashset_slist.cpp | 158 + .../set/hdr_striped_hashset_vector.cpp | 135 + tests/test-hdr/set/hdr_striped_set.h | 770 ++ .../set/intrusive_cuckoo_set_common.h | 51 + tests/test-hdr/size_check.h | 36 + .../stack/hdr_elimination_stack_hp.cpp | 70 + .../stack/hdr_elimination_stack_hrc.cpp | 69 + .../stack/hdr_elimination_stack_ptb.cpp | 69 + tests/test-hdr/stack/hdr_fcstack.cpp | 144 + .../hdr_intrusive_elimination_stack_hp.cpp | 154 + .../hdr_intrusive_elimination_stack_hrc.cpp | 69 + .../hdr_intrusive_elimination_stack_ptb.cpp | 136 + .../test-hdr/stack/hdr_intrusive_fcstack.cpp | 294 + .../stack/hdr_intrusive_treiber_stack.h | 220 + .../stack/hdr_intrusive_treiber_stack_hp.cpp | 119 + .../stack/hdr_intrusive_treiber_stack_hrc.cpp | 54 + .../stack/hdr_intrusive_treiber_stack_ptb.cpp | 104 + tests/test-hdr/stack/hdr_treiber_stack.h | 162 + tests/test-hdr/stack/hdr_treiber_stack_hp.cpp | 42 + .../test-hdr/stack/hdr_treiber_stack_hrc.cpp | 41 + .../test-hdr/stack/hdr_treiber_stack_ptb.cpp | 42 + tests/test-hdr/tree/hdr_ellenbintree_map.h | 706 ++ .../test-hdr/tree/hdr_ellenbintree_map_hp.cpp | 140 + .../tree/hdr_ellenbintree_map_ptb.cpp | 140 + .../tree/hdr_ellenbintree_map_rcu_gpb.cpp | 141 + .../tree/hdr_ellenbintree_map_rcu_gpi.cpp | 141 + .../tree/hdr_ellenbintree_map_rcu_gpt.cpp | 141 + .../tree/hdr_ellenbintree_map_rcu_shb.cpp | 160 + .../tree/hdr_ellenbintree_map_rcu_sht.cpp | 160 + tests/test-hdr/tree/hdr_ellenbintree_set.h | 802 ++ .../test-hdr/tree/hdr_ellenbintree_set_hp.cpp | 149 + .../tree/hdr_ellenbintree_set_ptb.cpp | 149 + .../tree/hdr_ellenbintree_set_rcu_gpb.cpp | 150 + .../tree/hdr_ellenbintree_set_rcu_gpi.cpp | 150 + .../tree/hdr_ellenbintree_set_rcu_gpt.cpp | 150 + .../tree/hdr_ellenbintree_set_rcu_shb.cpp | 169 + .../tree/hdr_ellenbintree_set_rcu_sht.cpp | 169 + tests/test-hdr/tree/hdr_intrusive_bintree.h | 1208 +++ .../tree/hdr_intrusive_ellen_bintree_hp.cpp | 167 + .../hdr_intrusive_ellen_bintree_hp_member.cpp | 169 + .../hdr_intrusive_ellen_bintree_pool_hp.h | 60 + .../hdr_intrusive_ellen_bintree_pool_ptb.h | 60 + .../hdr_intrusive_ellen_bintree_pool_rcu.h | 70 + .../tree/hdr_intrusive_ellen_bintree_ptb.cpp | 167 + ...hdr_intrusive_ellen_bintree_ptb_member.cpp | 169 + .../hdr_intrusive_ellen_bintree_rcu_gpb.cpp | 170 + ...intrusive_ellen_bintree_rcu_gpb_member.cpp | 172 + .../hdr_intrusive_ellen_bintree_rcu_gpi.cpp | 170 + ...intrusive_ellen_bintree_rcu_gpi_member.cpp | 172 + .../hdr_intrusive_ellen_bintree_rcu_gpt.cpp | 170 + ...intrusive_ellen_bintree_rcu_gpt_member.cpp | 172 + .../hdr_intrusive_ellen_bintree_rcu_shb.cpp | 190 + ...intrusive_ellen_bintree_rcu_shb_member.cpp | 191 + .../hdr_intrusive_ellen_bintree_rcu_sht.cpp | 189 + ...intrusive_ellen_bintree_rcu_sht_member.cpp | 192 + tests/test-hdr/tree/hdr_tree_reg.cpp | 29 + tests/unit/_template.cpp | 33 + tests/unit/alloc/hoard_threadtest.cpp | 180 + tests/unit/alloc/larson.cpp | 202 + tests/unit/alloc/linux_scale.cpp | 157 + tests/unit/alloc/michael_allocator.cpp | 8 + tests/unit/alloc/michael_allocator.h | 197 + tests/unit/alloc/random.cpp | 202 + tests/unit/alloc/random_gen.h | 44 + tests/unit/ellen_bintree_update_desc_pool.cpp | 12 + tests/unit/ellen_bintree_update_desc_pool.h | 101 + tests/unit/lock/nolock.h | 16 + tests/unit/lock/spinlock.cpp | 137 + tests/unit/lock/win32_lock.h | 42 + tests/unit/map2/map_defs.h | 550 ++ tests/unit/map2/map_delodd.cpp | 748 ++ tests/unit/map2/map_find_int.cpp | 279 + tests/unit/map2/map_find_string.cpp | 275 + tests/unit/map2/map_insdel_func.cpp | 550 ++ tests/unit/map2/map_insdel_int.cpp | 280 + tests/unit/map2/map_insdel_item_int.cpp | 281 + tests/unit/map2/map_insdel_item_string.cpp | 282 + tests/unit/map2/map_insdel_string.cpp | 281 + tests/unit/map2/map_insdelfind.cpp | 270 + tests/unit/map2/map_insfind_int.cpp | 217 + tests/unit/map2/map_types.h | 5199 +++++++++++++ tests/unit/map2/std_hash_map.h | 14 + tests/unit/map2/std_hash_map_gcc.h | 99 + tests/unit/map2/std_hash_map_vc.h | 87 + tests/unit/map2/std_map.h | 14 + tests/unit/map2/std_map_gcc.h | 89 + tests/unit/map2/std_map_vc.h | 87 + tests/unit/michael_alloc.cpp | 7 + tests/unit/michael_alloc.h | 67 + tests/unit/nonconcurrent_iterator_sequence.h | 138 + tests/unit/pqueue/ellen_bintree_pqueue.h | 117 + tests/unit/pqueue/pop.cpp | 240 + tests/unit/pqueue/pqueue_defs.h | 178 + tests/unit/pqueue/pqueue_item.h | 75 + tests/unit/pqueue/pqueue_type.h | 493 ++ tests/unit/pqueue/push.cpp | 199 + tests/unit/pqueue/push_pop.cpp | 239 + tests/unit/pqueue/skiplist_pqueue.h | 117 + tests/unit/pqueue/std_pqueue.h | 110 + tests/unit/print_cuckoo_stat.h | 89 + tests/unit/print_deque_stat.h | 41 + tests/unit/print_ellenbintree_stat.h | 50 + tests/unit/print_mspriorityqueue_stat.h | 26 + tests/unit/print_segmentedqueue_stat.h | 31 + tests/unit/print_skip_list_stat.h | 62 + tests/unit/queue/intrusive_queue_defs.h | 213 + .../queue/intrusive_queue_reader_writer.cpp | 462 ++ tests/unit/queue/intrusive_queue_type.h | 691 ++ tests/unit/queue/queue_defs.h | 308 + tests/unit/queue/queue_pop.cpp | 229 + tests/unit/queue/queue_push.cpp | 240 + tests/unit/queue/queue_random.cpp | 301 + tests/unit/queue/queue_reader_writer.cpp | 368 + tests/unit/queue/queue_type.h | 895 +++ tests/unit/queue/std_queue.h | 53 + tests/unit/set2/set_defs.h | 521 ++ tests/unit/set2/set_delodd.cpp | 783 ++ tests/unit/set2/set_insdel_func.cpp | 22 + tests/unit/set2/set_insdel_func.h | 583 ++ tests/unit/set2/set_insdel_func2.cpp | 10 + tests/unit/set2/set_insdel_func3.cpp | 10 + tests/unit/set2/set_insdel_func4.cpp | 11 + tests/unit/set2/set_insdel_func5.cpp | 10 + tests/unit/set2/set_insdel_func6.cpp | 10 + tests/unit/set2/set_insdel_func7.cpp | 10 + tests/unit/set2/set_insdel_string.cpp | 558 ++ tests/unit/set2/set_insdelfind.cpp | 276 + tests/unit/set2/set_types.h | 4800 ++++++++++++ tests/unit/set2/std_hash_set.h | 12 + tests/unit/set2/std_hash_set_std.h | 102 + tests/unit/set2/std_hash_set_vc9.h | 106 + tests/unit/set2/std_set.h | 90 + tests/unit/stack/intrusive_stack_defs.h | 215 + tests/unit/stack/intrusive_stack_type.h | 684 ++ tests/unit/stack/stack_defs.h | 242 + tests/unit/stack/stack_intrusive_pushpop.cpp | 378 + tests/unit/stack/stack_push.cpp | 213 + tests/unit/stack/stack_pushpop.cpp | 280 + tests/unit/stack/stack_type.h | 704 ++ 1072 files changed, 297238 insertions(+) create mode 100644 brush_cds.pl create mode 100644 build/Makefile create mode 100644 build/build.sh create mode 100644 build/sample/build-freebsd-amd64.sh create mode 100644 build/sample/build-hpux1123.sh create mode 100644 build/sample/build-hpux1131.sh create mode 100644 build/sample/build-linux-amd64.sh create mode 100644 build/sample/build-linux-ia64.sh create mode 100644 build/sample/build-linux-sparc.sh create mode 100644 build/sample/build-linux-x86.sh create mode 100644 build/sample/build-mingw-amd64.bat create mode 100644 build/sample/build-osx-clang-libc++.sh create mode 100644 build/sample/build-osx-gcc.sh create mode 100644 build/sample/build-sun-sparc.sh create mode 100644 cds/algo/base.h create mode 100644 cds/algo/elimination.h create mode 100644 cds/algo/elimination_opt.h create mode 100644 cds/algo/elimination_tls.h create mode 100644 cds/algo/flat_combining.h create mode 100644 cds/backoff_strategy.h create mode 100644 cds/bitop.h create mode 100644 cds/compiler/backoff.h create mode 100644 cds/compiler/bitop.h create mode 100644 cds/compiler/clang/cxx11_atomic_prepatches.h create mode 100644 cds/compiler/clang/defs.h create mode 100644 cds/compiler/cstdint_boost.h create mode 100644 cds/compiler/cstdint_std.h create mode 100644 cds/compiler/cxx11_atomic.h create mode 100644 cds/compiler/cxx11_atomic_patches.h create mode 100644 cds/compiler/cxx11_atomic_prepatches.h create mode 100644 cds/compiler/defs.h create mode 100644 cds/compiler/gcc/amd64/backoff.h create mode 100644 cds/compiler/gcc/amd64/bitop.h create mode 100644 cds/compiler/gcc/amd64/cxx11_atomic.h create mode 100644 cds/compiler/gcc/compiler_barriers.h create mode 100644 cds/compiler/gcc/compiler_macro.h create mode 100644 cds/compiler/gcc/cxx11_atomic_patches.h create mode 100644 cds/compiler/gcc/defs.h create mode 100644 cds/compiler/gcc/ia64/backoff.h create mode 100644 cds/compiler/gcc/ia64/bitop.h create mode 100644 cds/compiler/gcc/ia64/cxx11_atomic.h create mode 100644 cds/compiler/gcc/ppc64/backoff.h create mode 100644 cds/compiler/gcc/ppc64/bitop.h create mode 100644 cds/compiler/gcc/sparc/backoff.h create mode 100644 cds/compiler/gcc/sparc/bitop.h create mode 100644 cds/compiler/gcc/sparc/cxx11_atomic.h create mode 100644 cds/compiler/gcc/x86/backoff.h create mode 100644 cds/compiler/gcc/x86/bitop.h create mode 100644 cds/compiler/gcc/x86/cxx11_atomic.h create mode 100644 cds/compiler/gcc/x86/cxx11_atomic32.h create mode 100644 cds/compiler/icl/compiler_barriers.h create mode 100644 cds/compiler/icl/cxx11_atomic_patches_win.h create mode 100644 cds/compiler/icl/defs.h create mode 100644 cds/compiler/vc/amd64/backoff.h create mode 100644 cds/compiler/vc/amd64/bitop.h create mode 100644 cds/compiler/vc/amd64/cxx11_atomic.h create mode 100644 cds/compiler/vc/compiler_barriers.h create mode 100644 cds/compiler/vc/defs.h create mode 100644 cds/compiler/vc/x86/backoff.h create mode 100644 cds/compiler/vc/x86/bitop.h create mode 100644 cds/compiler/vc/x86/cxx11_atomic.h create mode 100644 cds/container/base.h create mode 100644 cds/container/basket_queue.h create mode 100644 cds/container/cuckoo_base.h create mode 100644 cds/container/cuckoo_map.h create mode 100644 cds/container/cuckoo_set.h create mode 100644 cds/container/details/guarded_ptr_cast.h create mode 100644 cds/container/details/make_lazy_kvlist.h create mode 100644 cds/container/details/make_lazy_list.h create mode 100644 cds/container/details/make_michael_kvlist.h create mode 100644 cds/container/details/make_michael_list.h create mode 100644 cds/container/details/make_skip_list_map.h create mode 100644 cds/container/details/make_skip_list_set.h create mode 100644 cds/container/details/make_split_list_set.h create mode 100644 cds/container/ellen_bintree_base.h create mode 100644 cds/container/ellen_bintree_map_hp.h create mode 100644 cds/container/ellen_bintree_map_impl.h create mode 100644 cds/container/ellen_bintree_map_ptb.h create mode 100644 cds/container/ellen_bintree_map_rcu.h create mode 100644 cds/container/ellen_bintree_set_hp.h create mode 100644 cds/container/ellen_bintree_set_impl.h create mode 100644 cds/container/ellen_bintree_set_ptb.h create mode 100644 cds/container/ellen_bintree_set_rcu.h create mode 100644 cds/container/fcdeque.h create mode 100644 cds/container/fcpriority_queue.h create mode 100644 cds/container/fcqueue.h create mode 100644 cds/container/fcstack.h create mode 100644 cds/container/lazy_kvlist_hp.h create mode 100644 cds/container/lazy_kvlist_hrc.h create mode 100644 cds/container/lazy_kvlist_impl.h create mode 100644 cds/container/lazy_kvlist_nogc.h create mode 100644 cds/container/lazy_kvlist_ptb.h create mode 100644 cds/container/lazy_kvlist_rcu.h create mode 100644 cds/container/lazy_list_base.h create mode 100644 cds/container/lazy_list_hp.h create mode 100644 cds/container/lazy_list_hrc.h create mode 100644 cds/container/lazy_list_impl.h create mode 100644 cds/container/lazy_list_nogc.h create mode 100644 cds/container/lazy_list_ptb.h create mode 100644 cds/container/lazy_list_rcu.h create mode 100644 cds/container/michael_deque.h create mode 100644 cds/container/michael_kvlist_hp.h create mode 100644 cds/container/michael_kvlist_hrc.h create mode 100644 cds/container/michael_kvlist_impl.h create mode 100644 cds/container/michael_kvlist_nogc.h create mode 100644 cds/container/michael_kvlist_ptb.h create mode 100644 cds/container/michael_kvlist_rcu.h create mode 100644 cds/container/michael_list_base.h create mode 100644 cds/container/michael_list_hp.h create mode 100644 cds/container/michael_list_hrc.h create mode 100644 cds/container/michael_list_impl.h create mode 100644 cds/container/michael_list_nogc.h create mode 100644 cds/container/michael_list_ptb.h create mode 100644 cds/container/michael_list_rcu.h create mode 100644 cds/container/michael_map.h create mode 100644 cds/container/michael_map_base.h create mode 100644 cds/container/michael_map_nogc.h create mode 100644 cds/container/michael_map_rcu.h create mode 100644 cds/container/michael_set.h create mode 100644 cds/container/michael_set_base.h create mode 100644 cds/container/michael_set_nogc.h create mode 100644 cds/container/michael_set_rcu.h create mode 100644 cds/container/moir_queue.h create mode 100644 cds/container/mspriority_queue.h create mode 100644 cds/container/msqueue.h create mode 100644 cds/container/optimistic_queue.h create mode 100644 cds/container/rwqueue.h create mode 100644 cds/container/segmented_queue.h create mode 100644 cds/container/skip_list_base.h create mode 100644 cds/container/skip_list_map_hp.h create mode 100644 cds/container/skip_list_map_hrc.h create mode 100644 cds/container/skip_list_map_impl.h create mode 100644 cds/container/skip_list_map_nogc.h create mode 100644 cds/container/skip_list_map_ptb.h create mode 100644 cds/container/skip_list_map_rcu.h create mode 100644 cds/container/skip_list_set_hp.h create mode 100644 cds/container/skip_list_set_hrc.h create mode 100644 cds/container/skip_list_set_impl.h create mode 100644 cds/container/skip_list_set_nogc.h create mode 100644 cds/container/skip_list_set_ptb.h create mode 100644 cds/container/skip_list_set_rcu.h create mode 100644 cds/container/split_list_base.h create mode 100644 cds/container/split_list_map.h create mode 100644 cds/container/split_list_map_nogc.h create mode 100644 cds/container/split_list_map_rcu.h create mode 100644 cds/container/split_list_set.h create mode 100644 cds/container/split_list_set_nogc.h create mode 100644 cds/container/split_list_set_rcu.h create mode 100644 cds/container/striped_map.h create mode 100644 cds/container/striped_map/boost_flat_map.h create mode 100644 cds/container/striped_map/boost_list.h create mode 100644 cds/container/striped_map/boost_map.h create mode 100644 cds/container/striped_map/boost_slist.h create mode 100644 cds/container/striped_map/boost_unordered_map.h create mode 100644 cds/container/striped_map/std_hash_map.h create mode 100644 cds/container/striped_map/std_hash_map_std.h create mode 100644 cds/container/striped_map/std_hash_map_vc.h create mode 100644 cds/container/striped_map/std_list.h create mode 100644 cds/container/striped_map/std_map.h create mode 100644 cds/container/striped_set.h create mode 100644 cds/container/striped_set/adapter.h create mode 100644 cds/container/striped_set/boost_flat_set.h create mode 100644 cds/container/striped_set/boost_list.h create mode 100644 cds/container/striped_set/boost_set.h create mode 100644 cds/container/striped_set/boost_slist.h create mode 100644 cds/container/striped_set/boost_stable_vector.h create mode 100644 cds/container/striped_set/boost_unordered_set.h create mode 100644 cds/container/striped_set/boost_vector.h create mode 100644 cds/container/striped_set/std_hash_set.h create mode 100644 cds/container/striped_set/std_hash_set_std.h create mode 100644 cds/container/striped_set/std_hash_set_vc.h create mode 100644 cds/container/striped_set/std_list.h create mode 100644 cds/container/striped_set/std_set.h create mode 100644 cds/container/striped_set/std_vector.h create mode 100644 cds/container/treiber_stack.h create mode 100644 cds/container/tsigas_cycle_queue.h create mode 100644 cds/container/vyukov_mpmc_cycle_queue.h create mode 100644 cds/cxx11_atomic.h create mode 100644 cds/details/aligned_allocator.h create mode 100644 cds/details/aligned_type.h create mode 100644 cds/details/allocator.h create mode 100644 cds/details/binary_functor_wrapper.h create mode 100644 cds/details/bit_reverse_counter.h create mode 100644 cds/details/bitop_generic.h create mode 100644 cds/details/bounded_array.h create mode 100644 cds/details/bounded_container.h create mode 100644 cds/details/comparator.h create mode 100644 cds/details/cxx11_features.h create mode 100644 cds/details/defs.h create mode 100644 cds/details/functor_wrapper.h create mode 100644 cds/details/hash_functor_selector.h create mode 100644 cds/details/is_aligned.h create mode 100644 cds/details/lib.h create mode 100644 cds/details/make_const_type.h create mode 100644 cds/details/marked_ptr.h create mode 100644 cds/details/noncopyable.h create mode 100644 cds/details/static_functor.h create mode 100644 cds/details/std/chrono.h create mode 100644 cds/details/std/condition_variable.h create mode 100644 cds/details/std/memory.h create mode 100644 cds/details/std/mutex.h create mode 100644 cds/details/std/thread.h create mode 100644 cds/details/std/tuple.h create mode 100644 cds/details/std/type_traits.h create mode 100644 cds/details/trivial_assign.h create mode 100644 cds/details/type_padding.h create mode 100644 cds/details/void_selector.h create mode 100644 cds/gc/all.h create mode 100644 cds/gc/default_gc.h create mode 100644 cds/gc/details/retired_ptr.h create mode 100644 cds/gc/exception.h create mode 100644 cds/gc/gc_fwd.h create mode 100644 cds/gc/guarded_ptr.h create mode 100644 cds/gc/hp.h create mode 100644 cds/gc/hp_decl.h create mode 100644 cds/gc/hp_impl.h create mode 100644 cds/gc/hrc.h create mode 100644 cds/gc/hrc/details/hrc_fwd.h create mode 100644 cds/gc/hrc/details/hrc_inline.h create mode 100644 cds/gc/hrc/details/hrc_retired.h create mode 100644 cds/gc/hrc/gc_fwd.h create mode 100644 cds/gc/hrc/hrc.h create mode 100644 cds/gc/hrc_decl.h create mode 100644 cds/gc/hrc_impl.h create mode 100644 cds/gc/hzp/details/hp_alloc.h create mode 100644 cds/gc/hzp/details/hp_fwd.h create mode 100644 cds/gc/hzp/details/hp_inline.h create mode 100644 cds/gc/hzp/details/hp_retired.h create mode 100644 cds/gc/hzp/details/hp_type.h create mode 100644 cds/gc/hzp/hzp.h create mode 100644 cds/gc/nogc.h create mode 100644 cds/gc/ptb.h create mode 100644 cds/gc/ptb/ptb.h create mode 100644 cds/gc/ptb_decl.h create mode 100644 cds/gc/ptb_impl.h create mode 100644 cds/init.h create mode 100644 cds/int_algo.h create mode 100644 cds/intrusive/base.h create mode 100644 cds/intrusive/basket_queue.h create mode 100644 cds/intrusive/cuckoo_set.h create mode 100644 cds/intrusive/deque_stat.h create mode 100644 cds/intrusive/details/dummy_node_holder.h create mode 100644 cds/intrusive/details/ellen_bintree_base.h create mode 100644 cds/intrusive/ellen_bintree_hp.h create mode 100644 cds/intrusive/ellen_bintree_impl.h create mode 100644 cds/intrusive/ellen_bintree_ptb.h create mode 100644 cds/intrusive/ellen_bintree_rcu.h create mode 100644 cds/intrusive/fcqueue.h create mode 100644 cds/intrusive/fcstack.h create mode 100644 cds/intrusive/lazy_list_base.h create mode 100644 cds/intrusive/lazy_list_hp.h create mode 100644 cds/intrusive/lazy_list_hrc.h create mode 100644 cds/intrusive/lazy_list_impl.h create mode 100644 cds/intrusive/lazy_list_nogc.h create mode 100644 cds/intrusive/lazy_list_ptb.h create mode 100644 cds/intrusive/lazy_list_rcu.h create mode 100644 cds/intrusive/michael_deque.h create mode 100644 cds/intrusive/michael_list_base.h create mode 100644 cds/intrusive/michael_list_hp.h create mode 100644 cds/intrusive/michael_list_hrc.h create mode 100644 cds/intrusive/michael_list_impl.h create mode 100644 cds/intrusive/michael_list_nogc.h create mode 100644 cds/intrusive/michael_list_ptb.h create mode 100644 cds/intrusive/michael_list_rcu.h create mode 100644 cds/intrusive/michael_set.h create mode 100644 cds/intrusive/michael_set_base.h create mode 100644 cds/intrusive/michael_set_nogc.h create mode 100644 cds/intrusive/michael_set_rcu.h create mode 100644 cds/intrusive/moir_queue.h create mode 100644 cds/intrusive/mspriority_queue.h create mode 100644 cds/intrusive/msqueue.h create mode 100644 cds/intrusive/node_traits.h create mode 100644 cds/intrusive/optimistic_queue.h create mode 100644 cds/intrusive/options.h create mode 100644 cds/intrusive/queue_stat.h create mode 100644 cds/intrusive/segmented_queue.h create mode 100644 cds/intrusive/single_link_struct.h create mode 100644 cds/intrusive/skip_list_base.h create mode 100644 cds/intrusive/skip_list_hp.h create mode 100644 cds/intrusive/skip_list_hrc.h create mode 100644 cds/intrusive/skip_list_impl.h create mode 100644 cds/intrusive/skip_list_nogc.h create mode 100644 cds/intrusive/skip_list_ptb.h create mode 100644 cds/intrusive/skip_list_rcu.h create mode 100644 cds/intrusive/split_list.h create mode 100644 cds/intrusive/split_list_base.h create mode 100644 cds/intrusive/split_list_nogc.h create mode 100644 cds/intrusive/split_list_rcu.h create mode 100644 cds/intrusive/striped_set.h create mode 100644 cds/intrusive/striped_set/adapter.h create mode 100644 cds/intrusive/striped_set/boost_avl_set.h create mode 100644 cds/intrusive/striped_set/boost_list.h create mode 100644 cds/intrusive/striped_set/boost_set.h create mode 100644 cds/intrusive/striped_set/boost_sg_set.h create mode 100644 cds/intrusive/striped_set/boost_slist.h create mode 100644 cds/intrusive/striped_set/boost_splay_set.h create mode 100644 cds/intrusive/striped_set/boost_treap_set.h create mode 100644 cds/intrusive/striped_set/boost_unordered_set.h create mode 100644 cds/intrusive/striped_set/resizing_policy.h create mode 100644 cds/intrusive/striped_set/striping_policy.h create mode 100644 cds/intrusive/treiber_stack.h create mode 100644 cds/intrusive/tsigas_cycle_queue.h create mode 100644 cds/intrusive/vyukov_mpmc_cycle_queue.h create mode 100644 cds/lock/array.h create mode 100644 cds/lock/scoped_lock.h create mode 100644 cds/lock/spinlock.h create mode 100644 cds/memory/michael/allocator.h create mode 100644 cds/memory/michael/bound_check.h create mode 100644 cds/memory/michael/options.h create mode 100644 cds/memory/michael/osalloc_stat.h create mode 100644 cds/memory/michael/procheap_stat.h create mode 100644 cds/memory/pool_allocator.h create mode 100644 cds/memory/vyukov_queue_pool.h create mode 100644 cds/numtraits.h create mode 100644 cds/opt/buffer.h create mode 100644 cds/opt/compare.h create mode 100644 cds/opt/hash.h create mode 100644 cds/opt/make_options_std.h create mode 100644 cds/opt/make_options_var.h create mode 100644 cds/opt/options.h create mode 100644 cds/opt/permutation.h create mode 100644 cds/opt/value_cleaner.h create mode 100644 cds/os/aix/alloc_aligned.h create mode 100644 cds/os/aix/timer.h create mode 100644 cds/os/aix/topology.h create mode 100644 cds/os/alloc_aligned.h create mode 100644 cds/os/details/fake_topology.h create mode 100644 cds/os/free_bsd/alloc_aligned.h create mode 100644 cds/os/free_bsd/timer.h create mode 100644 cds/os/free_bsd/topology.h create mode 100644 cds/os/hpux/alloc_aligned.h create mode 100644 cds/os/hpux/timer.h create mode 100644 cds/os/hpux/topology.h create mode 100644 cds/os/libc/alloc_aligned.h create mode 100644 cds/os/linux/alloc_aligned.h create mode 100644 cds/os/linux/timer.h create mode 100644 cds/os/linux/topology.h create mode 100644 cds/os/osx/timer.h create mode 100644 cds/os/osx/topology.h create mode 100644 cds/os/posix/alloc_aligned.h create mode 100644 cds/os/posix/fake_topology.h create mode 100644 cds/os/posix/syserror.h create mode 100644 cds/os/posix/thread.h create mode 100644 cds/os/posix/timer.h create mode 100644 cds/os/sunos/alloc_aligned.h create mode 100644 cds/os/sunos/timer.h create mode 100644 cds/os/sunos/topology.h create mode 100644 cds/os/syserror.h create mode 100644 cds/os/thread.h create mode 100644 cds/os/timer.h create mode 100644 cds/os/topology.h create mode 100644 cds/os/win/alloc_aligned.h create mode 100644 cds/os/win/syserror.h create mode 100644 cds/os/win/thread.h create mode 100644 cds/os/win/timer.h create mode 100644 cds/os/win/topology.h create mode 100644 cds/ref.h create mode 100644 cds/refcounter.h create mode 100644 cds/threading/details/_common.h create mode 100644 cds/threading/details/auto_detect.h create mode 100644 cds/threading/details/cxx11.h create mode 100644 cds/threading/details/cxx11_manager.h create mode 100644 cds/threading/details/gcc.h create mode 100644 cds/threading/details/gcc_manager.h create mode 100644 cds/threading/details/msvc.h create mode 100644 cds/threading/details/msvc_manager.h create mode 100644 cds/threading/details/pthread.h create mode 100644 cds/threading/details/pthread_manager.h create mode 100644 cds/threading/details/wintls.h create mode 100644 cds/threading/details/wintls_manager.h create mode 100644 cds/threading/model.h create mode 100644 cds/urcu/details/base.h create mode 100644 cds/urcu/details/check_deadlock.h create mode 100644 cds/urcu/details/gp.h create mode 100644 cds/urcu/details/gp_decl.h create mode 100644 cds/urcu/details/gpb.h create mode 100644 cds/urcu/details/gpi.h create mode 100644 cds/urcu/details/gpt.h create mode 100644 cds/urcu/details/sh.h create mode 100644 cds/urcu/details/sh_decl.h create mode 100644 cds/urcu/details/sig_buffered.h create mode 100644 cds/urcu/details/sig_threaded.h create mode 100644 cds/urcu/dispose_thread.h create mode 100644 cds/urcu/exempt_ptr.h create mode 100644 cds/urcu/general_buffered.h create mode 100644 cds/urcu/general_instant.h create mode 100644 cds/urcu/general_threaded.h create mode 100644 cds/urcu/options.h create mode 100644 cds/urcu/signal_buffered.h create mode 100644 cds/urcu/signal_threaded.h create mode 100644 cds/user_setup/allocator.h create mode 100644 cds/user_setup/cache_line.h create mode 100644 cds/user_setup/threading.h create mode 100644 cds/version.h create mode 100644 change.log create mode 100644 doxygen/cds.doxy create mode 100644 doxygen/doxygen.log create mode 100644 doxygen/footer.html create mode 100644 doxygen/index.html create mode 100644 license.txt create mode 100644 make_distrib.pl create mode 100644 make_docs.bat create mode 100644 projects/Win/build-msbuild.cmd create mode 100644 projects/Win/build-vc10.cmd create mode 100644 projects/Win/build-vc11.cmd create mode 100644 projects/Win/build-vc12.cmd create mode 100644 projects/Win/build-vc9.cmd create mode 100644 projects/Win/vc10/cds.sln create mode 100644 projects/Win/vc10/cds.vcxproj create mode 100644 projects/Win/vc10/cds.vcxproj.filters create mode 100644 projects/Win/vc10/hdr-test-deque.vcxproj create mode 100644 projects/Win/vc10/hdr-test-map.vcxproj create mode 100644 projects/Win/vc10/hdr-test-map.vcxproj.filters create mode 100644 projects/Win/vc10/hdr-test-misc.vcxproj create mode 100644 projects/Win/vc10/hdr-test-ordered-list.vcxproj create mode 100644 projects/Win/vc10/hdr-test-ordered-list.vcxproj.filters create mode 100644 projects/Win/vc10/hdr-test-priority_queue.vcxproj create mode 100644 projects/Win/vc10/hdr-test-priority_queue.vcxproj.filters create mode 100644 projects/Win/vc10/hdr-test-queue.vcxproj create mode 100644 projects/Win/vc10/hdr-test-queue.vcxproj.filters create mode 100644 projects/Win/vc10/hdr-test-set.vcxproj create mode 100644 projects/Win/vc10/hdr-test-set.vcxproj.filters create mode 100644 projects/Win/vc10/hdr-test-stack.vcxproj create mode 100644 projects/Win/vc10/hdr-test-striped-set.vcxproj create mode 100644 projects/Win/vc10/hdr-test-striped-set.vcxproj.filters create mode 100644 projects/Win/vc10/hdr-test-tree.vcxproj create mode 100644 projects/Win/vc10/hdr-test-tree.vcxproj.filters create mode 100644 projects/Win/vc10/unit-map-delodd.vcxproj create mode 100644 projects/Win/vc10/unit-map-find.vcxproj create mode 100644 projects/Win/vc10/unit-map-insdel.vcxproj create mode 100644 projects/Win/vc10/unit-misc.vcxproj create mode 100644 projects/Win/vc10/unit-misc.vcxproj.filters create mode 100644 projects/Win/vc10/unit-pqueue.vcxproj create mode 100644 projects/Win/vc10/unit-prerequisites.vcxproj create mode 100644 projects/Win/vc10/unit-queue.vcxproj create mode 100644 projects/Win/vc10/unit-set-delodd.vcxproj create mode 100644 projects/Win/vc10/unit-set-insdel.vcxproj create mode 100644 projects/Win/vc10/unit-stack.vcxproj create mode 100644 projects/Win/vc11/cds.sln create mode 100644 projects/Win/vc11/cds.vcxproj create mode 100644 projects/Win/vc11/cds.vcxproj.filters create mode 100644 projects/Win/vc11/hdr-test-deque.vcxproj create mode 100644 projects/Win/vc11/hdr-test-map.vcxproj create mode 100644 projects/Win/vc11/hdr-test-map.vcxproj.filters create mode 100644 projects/Win/vc11/hdr-test-misc.vcxproj create mode 100644 projects/Win/vc11/hdr-test-ordered-list.vcxproj create mode 100644 projects/Win/vc11/hdr-test-ordered-list.vcxproj.filters create mode 100644 projects/Win/vc11/hdr-test-priority_queue.vcxproj create mode 100644 projects/Win/vc11/hdr-test-priority_queue.vcxproj.filters create mode 100644 projects/Win/vc11/hdr-test-queue.vcxproj create mode 100644 projects/Win/vc11/hdr-test-queue.vcxproj.filters create mode 100644 projects/Win/vc11/hdr-test-set.vcxproj create mode 100644 projects/Win/vc11/hdr-test-set.vcxproj.filters create mode 100644 projects/Win/vc11/hdr-test-stack.vcxproj create mode 100644 projects/Win/vc11/hdr-test-striped-set.vcxproj create mode 100644 projects/Win/vc11/hdr-test-striped-set.vcxproj.filters create mode 100644 projects/Win/vc11/hdr-test-tree.vcxproj create mode 100644 projects/Win/vc11/hdr-test-tree.vcxproj.filters create mode 100644 projects/Win/vc11/unit-map-delodd.vcxproj create mode 100644 projects/Win/vc11/unit-map-find.vcxproj create mode 100644 projects/Win/vc11/unit-map-insdel.vcxproj create mode 100644 projects/Win/vc11/unit-misc.vcxproj create mode 100644 projects/Win/vc11/unit-misc.vcxproj.filters create mode 100644 projects/Win/vc11/unit-pqueue.vcxproj create mode 100644 projects/Win/vc11/unit-prerequisites.vcxproj create mode 100644 projects/Win/vc11/unit-queue.vcxproj create mode 100644 projects/Win/vc11/unit-set-delodd.vcxproj create mode 100644 projects/Win/vc11/unit-set-insdel.vcxproj create mode 100644 projects/Win/vc11/unit-stack.vcxproj create mode 100644 projects/Win/vc12/cds.sln create mode 100644 projects/Win/vc12/cds.vcxproj create mode 100644 projects/Win/vc12/cds.vcxproj.filters create mode 100644 projects/Win/vc12/hdr-test-deque.vcxproj create mode 100644 projects/Win/vc12/hdr-test-map.vcxproj create mode 100644 projects/Win/vc12/hdr-test-map.vcxproj.filters create mode 100644 projects/Win/vc12/hdr-test-misc.vcxproj create mode 100644 projects/Win/vc12/hdr-test-ordered-list.vcxproj create mode 100644 projects/Win/vc12/hdr-test-ordered-list.vcxproj.filters create mode 100644 projects/Win/vc12/hdr-test-priority_queue.vcxproj create mode 100644 projects/Win/vc12/hdr-test-priority_queue.vcxproj.filters create mode 100644 projects/Win/vc12/hdr-test-queue.vcxproj create mode 100644 projects/Win/vc12/hdr-test-queue.vcxproj.filters create mode 100644 projects/Win/vc12/hdr-test-set.vcxproj create mode 100644 projects/Win/vc12/hdr-test-set.vcxproj.filters create mode 100644 projects/Win/vc12/hdr-test-stack.vcxproj create mode 100644 projects/Win/vc12/hdr-test-striped-set.vcxproj create mode 100644 projects/Win/vc12/hdr-test-striped-set.vcxproj.filters create mode 100644 projects/Win/vc12/hdr-test-tree.vcxproj create mode 100644 projects/Win/vc12/hdr-test-tree.vcxproj.filters create mode 100644 projects/Win/vc12/unit-map-delodd.vcxproj create mode 100644 projects/Win/vc12/unit-map-find.vcxproj create mode 100644 projects/Win/vc12/unit-map-insdel.vcxproj create mode 100644 projects/Win/vc12/unit-misc.vcxproj create mode 100644 projects/Win/vc12/unit-misc.vcxproj.filters create mode 100644 projects/Win/vc12/unit-pqueue.vcxproj create mode 100644 projects/Win/vc12/unit-prerequisites.vcxproj create mode 100644 projects/Win/vc12/unit-queue.vcxproj create mode 100644 projects/Win/vc12/unit-set-delodd.vcxproj create mode 100644 projects/Win/vc12/unit-set-insdel.vcxproj create mode 100644 projects/Win/vc12/unit-stack.vcxproj create mode 100644 projects/Win/vc9/cds.ncb create mode 100644 projects/Win/vc9/cds.sln create mode 100644 projects/Win/vc9/cds.suo create mode 100644 projects/Win/vc9/cds.vcproj create mode 100644 projects/Win/vc9/cds.vcproj.user.u.user create mode 100644 projects/Win/vc9/hdr-test-deque.vcproj create mode 100644 projects/Win/vc9/hdr-test-deque.vcproj.user.u.user create mode 100644 projects/Win/vc9/hdr-test-map.vcproj create mode 100644 projects/Win/vc9/hdr-test-map.vcproj.user.u.user create mode 100644 projects/Win/vc9/hdr-test-misc.vcproj create mode 100644 projects/Win/vc9/hdr-test-misc.vcproj.user.u.user create mode 100644 projects/Win/vc9/hdr-test-ordered-list.vcproj create mode 100644 projects/Win/vc9/hdr-test-ordered-list.vcproj.user.u.user create mode 100644 projects/Win/vc9/hdr-test-priority_queue.vcproj create mode 100644 projects/Win/vc9/hdr-test-priority_queue.vcproj.user.u.user create mode 100644 projects/Win/vc9/hdr-test-project-template.vcproj create mode 100644 projects/Win/vc9/hdr-test-queue.vcproj create mode 100644 projects/Win/vc9/hdr-test-queue.vcproj.user.u.user create mode 100644 projects/Win/vc9/hdr-test-set.vcproj create mode 100644 projects/Win/vc9/hdr-test-set.vcproj.user.u.user create mode 100644 projects/Win/vc9/hdr-test-stack.vcproj create mode 100644 projects/Win/vc9/hdr-test-stack.vcproj.user.u.user create mode 100644 projects/Win/vc9/hdr-test-striped-set.vcproj create mode 100644 projects/Win/vc9/hdr-test-striped-set.vcproj.user.u.user create mode 100644 projects/Win/vc9/hdr-test-tree.vcproj create mode 100644 projects/Win/vc9/hdr-test-tree.vcproj.user.u.user create mode 100644 projects/Win/vc9/unit-map-delodd.vcproj create mode 100644 projects/Win/vc9/unit-map-delodd.vcproj.user.u.user create mode 100644 projects/Win/vc9/unit-map-find.vcproj create mode 100644 projects/Win/vc9/unit-map-find.vcproj.user.u.user create mode 100644 projects/Win/vc9/unit-map-insdel.vcproj create mode 100644 projects/Win/vc9/unit-map-insdel.vcproj.user.u.user create mode 100644 projects/Win/vc9/unit-misc.vcproj create mode 100644 projects/Win/vc9/unit-misc.vcproj.user.u.user create mode 100644 projects/Win/vc9/unit-pqueue.vcproj create mode 100644 projects/Win/vc9/unit-pqueue.vcproj.user.u.user create mode 100644 projects/Win/vc9/unit-prerequisites.vcproj create mode 100644 projects/Win/vc9/unit-prerequisites.vcproj.user.u.user create mode 100644 projects/Win/vc9/unit-project-template.vcproj create mode 100644 projects/Win/vc9/unit-queue.vcproj create mode 100644 projects/Win/vc9/unit-queue.vcproj.user.u.user create mode 100644 projects/Win/vc9/unit-set-delodd.vcproj create mode 100644 projects/Win/vc9/unit-set-delodd.vcproj.user.u.user create mode 100644 projects/Win/vc9/unit-set-insdel.vcproj create mode 100644 projects/Win/vc9/unit-set-insdel.vcproj.user.u.user create mode 100644 projects/Win/vc9/unit-stack.vcproj create mode 100644 projects/Win/vc9/unit-stack.vcproj.user.u.user create mode 100644 projects/android/jni/Android.mk create mode 100644 projects/android/jni/Application.mk create mode 100644 projects/android/jni/build.sh create mode 100644 projects/source.libcds.mk create mode 100644 projects/source.test-common.mk create mode 100644 projects/source.test-hdr.mk create mode 100644 projects/source.test-hdr.offsetof.mk create mode 100644 projects/source.unit.map.mk create mode 100644 projects/source.unit.misc.mk create mode 100644 projects/source.unit.pqueue.mk create mode 100644 projects/source.unit.queue.mk create mode 100644 projects/source.unit.set.mk create mode 100644 projects/source.unit.stack.mk create mode 100644 readme create mode 100644 scripts/tab2space.pl create mode 100644 src/dllmain.cpp create mode 100644 src/hrc_gc.cpp create mode 100644 src/hzp_const.h create mode 100644 src/hzp_gc.cpp create mode 100644 src/init.cpp create mode 100644 src/michael_heap.cpp create mode 100644 src/ptb_gc.cpp create mode 100644 src/topology_hpux.cpp create mode 100644 src/topology_linux.cpp create mode 100644 src/topology_osx.cpp create mode 100644 src/urcu_gp.cpp create mode 100644 src/urcu_sh.cpp create mode 100644 tests/cppunit/cppunit_mini.h create mode 100644 tests/cppunit/cppunit_proxy.h create mode 100644 tests/cppunit/file_reporter.h create mode 100644 tests/cppunit/test_beans.h create mode 100644 tests/cppunit/test_main.cpp create mode 100644 tests/cppunit/thread.cpp create mode 100644 tests/cppunit/thread.h create mode 100644 tests/data/split.pl create mode 100644 tests/data/test-debug.conf create mode 100644 tests/data/test-express.conf create mode 100644 tests/data/test.conf create mode 100644 tests/data/text.txt create mode 100644 tests/test-hdr/deque/hdr_deque.h create mode 100644 tests/test-hdr/deque/hdr_fcdeque.cpp create mode 100644 tests/test-hdr/deque/hdr_intrusive_deque.h create mode 100644 tests/test-hdr/deque/hdr_intrusive_michael_deque_hp.cpp create mode 100644 tests/test-hdr/deque/hdr_intrusive_michael_deque_ptb.cpp create mode 100644 tests/test-hdr/deque/hdr_michael_deque_hp.cpp create mode 100644 tests/test-hdr/deque/hdr_michael_deque_ptb.cpp create mode 100644 tests/test-hdr/map/hdr_cuckoo_map.cpp create mode 100644 tests/test-hdr/map/hdr_cuckoo_map.h create mode 100644 tests/test-hdr/map/hdr_map.h create mode 100644 tests/test-hdr/map/hdr_michael_map_hp.cpp create mode 100644 tests/test-hdr/map/hdr_michael_map_hrc.cpp create mode 100644 tests/test-hdr/map/hdr_michael_map_lazy_hp.cpp create mode 100644 tests/test-hdr/map/hdr_michael_map_lazy_hrc.cpp create mode 100644 tests/test-hdr/map/hdr_michael_map_lazy_nogc.cpp create mode 100644 tests/test-hdr/map/hdr_michael_map_lazy_ptb.cpp create mode 100644 tests/test-hdr/map/hdr_michael_map_lazy_rcu_gpb.cpp create mode 100644 tests/test-hdr/map/hdr_michael_map_lazy_rcu_gpi.cpp create mode 100644 tests/test-hdr/map/hdr_michael_map_lazy_rcu_gpt.cpp create mode 100644 tests/test-hdr/map/hdr_michael_map_lazy_rcu_shb.cpp create mode 100644 tests/test-hdr/map/hdr_michael_map_lazy_rcu_sht.cpp create mode 100644 tests/test-hdr/map/hdr_michael_map_nogc.cpp create mode 100644 tests/test-hdr/map/hdr_michael_map_ptb.cpp create mode 100644 tests/test-hdr/map/hdr_michael_map_rcu_gpb.cpp create mode 100644 tests/test-hdr/map/hdr_michael_map_rcu_gpi.cpp create mode 100644 tests/test-hdr/map/hdr_michael_map_rcu_gpt.cpp create mode 100644 tests/test-hdr/map/hdr_michael_map_rcu_shb.cpp create mode 100644 tests/test-hdr/map/hdr_michael_map_rcu_sht.cpp create mode 100644 tests/test-hdr/map/hdr_refinable_hashmap_boost_flat_map.cpp create mode 100644 tests/test-hdr/map/hdr_refinable_hashmap_boost_list.cpp create mode 100644 tests/test-hdr/map/hdr_refinable_hashmap_boost_map.cpp create mode 100644 tests/test-hdr/map/hdr_refinable_hashmap_boost_unordered_map.cpp create mode 100644 tests/test-hdr/map/hdr_refinable_hashmap_hashmap_std.cpp create mode 100644 tests/test-hdr/map/hdr_refinable_hashmap_hashmap_vc.cpp create mode 100644 tests/test-hdr/map/hdr_refinable_hashmap_list.cpp create mode 100644 tests/test-hdr/map/hdr_refinable_hashmap_map.cpp create mode 100644 tests/test-hdr/map/hdr_refinable_hashmap_slist.cpp create mode 100644 tests/test-hdr/map/hdr_skiplist_map.h create mode 100644 tests/test-hdr/map/hdr_skiplist_map_hp.cpp create mode 100644 tests/test-hdr/map/hdr_skiplist_map_hrc.cpp create mode 100644 tests/test-hdr/map/hdr_skiplist_map_nogc.cpp create mode 100644 tests/test-hdr/map/hdr_skiplist_map_ptb.cpp create mode 100644 tests/test-hdr/map/hdr_skiplist_map_rcu.h create mode 100644 tests/test-hdr/map/hdr_skiplist_map_rcu_gpb.cpp create mode 100644 tests/test-hdr/map/hdr_skiplist_map_rcu_gpi.cpp create mode 100644 tests/test-hdr/map/hdr_skiplist_map_rcu_gpt.cpp create mode 100644 tests/test-hdr/map/hdr_skiplist_map_rcu_shb.cpp create mode 100644 tests/test-hdr/map/hdr_skiplist_map_rcu_sht.cpp create mode 100644 tests/test-hdr/map/hdr_splitlist_map_hp.cpp create mode 100644 tests/test-hdr/map/hdr_splitlist_map_hrc.cpp create mode 100644 tests/test-hdr/map/hdr_splitlist_map_lazy_hp.cpp create mode 100644 tests/test-hdr/map/hdr_splitlist_map_lazy_hrc.cpp create mode 100644 tests/test-hdr/map/hdr_splitlist_map_lazy_nogc.cpp create mode 100644 tests/test-hdr/map/hdr_splitlist_map_lazy_ptb.cpp create mode 100644 tests/test-hdr/map/hdr_splitlist_map_lazy_rcu_gpb.cpp create mode 100644 tests/test-hdr/map/hdr_splitlist_map_lazy_rcu_gpi.cpp create mode 100644 tests/test-hdr/map/hdr_splitlist_map_lazy_rcu_gpt.cpp create mode 100644 tests/test-hdr/map/hdr_splitlist_map_lazy_rcu_shb.cpp create mode 100644 tests/test-hdr/map/hdr_splitlist_map_lazy_rcu_sht.cpp create mode 100644 tests/test-hdr/map/hdr_splitlist_map_nogc.cpp create mode 100644 tests/test-hdr/map/hdr_splitlist_map_ptb.cpp create mode 100644 tests/test-hdr/map/hdr_splitlist_map_rcu_gpb.cpp create mode 100644 tests/test-hdr/map/hdr_splitlist_map_rcu_gpi.cpp create mode 100644 tests/test-hdr/map/hdr_splitlist_map_rcu_gpt.cpp create mode 100644 tests/test-hdr/map/hdr_splitlist_map_rcu_shb.cpp create mode 100644 tests/test-hdr/map/hdr_splitlist_map_rcu_sht.cpp create mode 100644 tests/test-hdr/map/hdr_striped_hashmap_boost_flat_map.cpp create mode 100644 tests/test-hdr/map/hdr_striped_hashmap_boost_list.cpp create mode 100644 tests/test-hdr/map/hdr_striped_hashmap_boost_map.cpp create mode 100644 tests/test-hdr/map/hdr_striped_hashmap_boost_unordered_map.cpp create mode 100644 tests/test-hdr/map/hdr_striped_hashmap_hashmap_std.cpp create mode 100644 tests/test-hdr/map/hdr_striped_hashmap_hashmap_vc.cpp create mode 100644 tests/test-hdr/map/hdr_striped_hashmap_list.cpp create mode 100644 tests/test-hdr/map/hdr_striped_hashmap_map.cpp create mode 100644 tests/test-hdr/map/hdr_striped_hashmap_slist.cpp create mode 100644 tests/test-hdr/map/hdr_striped_map.h create mode 100644 tests/test-hdr/map/hdr_striped_map_reg.cpp create mode 100644 tests/test-hdr/map/print_skiplist_stat.h create mode 100644 tests/test-hdr/misc/allocator_test.cpp create mode 100644 tests/test-hdr/misc/bitop_st.cpp create mode 100644 tests/test-hdr/misc/cxx11_atomic_class.cpp create mode 100644 tests/test-hdr/misc/cxx11_atomic_func.cpp create mode 100644 tests/test-hdr/misc/cxx11_convert_memory_order.h create mode 100644 tests/test-hdr/misc/find_option.cpp create mode 100644 tests/test-hdr/misc/hash_tuple.cpp create mode 100644 tests/test-hdr/misc/michael_allocator.cpp create mode 100644 tests/test-hdr/misc/michael_allocator.h create mode 100644 tests/test-hdr/misc/permutation_generator.cpp create mode 100644 tests/test-hdr/misc/thread_init_fini.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_lazy.h create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_lazy_hp.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_lazy_hrc.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_lazy_nogc.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_lazy_ptb.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_lazy_rcu_gpb.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_lazy_rcu_gpi.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_lazy_rcu_gpt.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_lazy_rcu_shb.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_lazy_rcu_sht.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_michael.h create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_michael_hp.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_michael_hrc.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_michael_list_rcu_gpb.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_michael_list_rcu_gpi.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_michael_list_rcu_gpt.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_michael_list_rcu_shb.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_michael_list_rcu_sht.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_michael_nogc.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_intrusive_michael_ptb.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_lazy.h create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_hp.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_hrc.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_kv.h create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_kv_hp.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_kv_hrc.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_kv_nogc.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_kv_ptb.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_kv_rcu_gpb.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_kv_rcu_gpi.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_kv_rcu_gpt.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_kv_rcu_shb.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_kv_rcu_sht.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_nogc.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_ptb.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_rcu_gpb.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_rcu_gpi.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_rcu_gpt.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_rcu_shb.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_lazy_rcu_sht.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_michael.h create mode 100644 tests/test-hdr/ordered_list/hdr_michael_hp.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_michael_hrc.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_michael_kv.h create mode 100644 tests/test-hdr/ordered_list/hdr_michael_kv_hp.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_michael_kv_hrc.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_michael_kv_nogc.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_michael_kv_ptb.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_michael_kv_rcu_gpb.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_michael_kv_rcu_gpi.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_michael_kv_rcu_gpt.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_michael_kv_rcu_shb.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_michael_kv_rcu_sht.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_michael_nogc.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_michael_ptb.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_michael_rcu_gpb.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_michael_rcu_gpi.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_michael_rcu_gpt.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_michael_rcu_shb.cpp create mode 100644 tests/test-hdr/ordered_list/hdr_michael_rcu_sht.cpp create mode 100644 tests/test-hdr/priority_queue/hdr_fcpqueue_boost_stable_vector.cpp create mode 100644 tests/test-hdr/priority_queue/hdr_fcpqueue_deque.cpp create mode 100644 tests/test-hdr/priority_queue/hdr_fcpqueue_vector.cpp create mode 100644 tests/test-hdr/priority_queue/hdr_intrusive_mspqueue_dyn.cpp create mode 100644 tests/test-hdr/priority_queue/hdr_intrusive_mspqueue_static.cpp create mode 100644 tests/test-hdr/priority_queue/hdr_intrusive_pqueue.h create mode 100644 tests/test-hdr/priority_queue/hdr_mspqueue_dyn.cpp create mode 100644 tests/test-hdr/priority_queue/hdr_mspqueue_static.cpp create mode 100644 tests/test-hdr/priority_queue/hdr_pqueue.h create mode 100644 tests/test-hdr/priority_queue/hdr_priority_queue_reg.cpp create mode 100644 tests/test-hdr/queue/hdr_basketqueue_hrc.cpp create mode 100644 tests/test-hdr/queue/hdr_basketqueue_hzp.cpp create mode 100644 tests/test-hdr/queue/hdr_basketqueue_ptb.cpp create mode 100644 tests/test-hdr/queue/hdr_fcqueue.cpp create mode 100644 tests/test-hdr/queue/hdr_intrusive_basketqueue_hp.cpp create mode 100644 tests/test-hdr/queue/hdr_intrusive_basketqueue_hrc.cpp create mode 100644 tests/test-hdr/queue/hdr_intrusive_basketqueue_node.h create mode 100644 tests/test-hdr/queue/hdr_intrusive_basketqueue_ptb.cpp create mode 100644 tests/test-hdr/queue/hdr_intrusive_fcqueue.cpp create mode 100644 tests/test-hdr/queue/hdr_intrusive_moirqueue_hp.cpp create mode 100644 tests/test-hdr/queue/hdr_intrusive_moirqueue_hrc.cpp create mode 100644 tests/test-hdr/queue/hdr_intrusive_moirqueue_ptb.cpp create mode 100644 tests/test-hdr/queue/hdr_intrusive_msqueue.h create mode 100644 tests/test-hdr/queue/hdr_intrusive_msqueue_hp.cpp create mode 100644 tests/test-hdr/queue/hdr_intrusive_msqueue_hrc.cpp create mode 100644 tests/test-hdr/queue/hdr_intrusive_msqueue_ptb.cpp create mode 100644 tests/test-hdr/queue/hdr_intrusive_optimisticqueue_hp.cpp create mode 100644 tests/test-hdr/queue/hdr_intrusive_optimisticqueue_ptb.cpp create mode 100644 tests/test-hdr/queue/hdr_intrusive_segmented_queue.h create mode 100644 tests/test-hdr/queue/hdr_intrusive_segmented_queue_hp.cpp create mode 100644 tests/test-hdr/queue/hdr_intrusive_segmented_queue_ptb.cpp create mode 100644 tests/test-hdr/queue/hdr_intrusive_singlelink_node.h create mode 100644 tests/test-hdr/queue/hdr_intrusive_tsigas_cycle_queue.cpp create mode 100644 tests/test-hdr/queue/hdr_intrusive_vyukovmpmc_cycle_queue.cpp create mode 100644 tests/test-hdr/queue/hdr_moirqueue_hrc.cpp create mode 100644 tests/test-hdr/queue/hdr_moirqueue_hzp.cpp create mode 100644 tests/test-hdr/queue/hdr_moirqueue_ptb.cpp create mode 100644 tests/test-hdr/queue/hdr_msqueue_hrc.cpp create mode 100644 tests/test-hdr/queue/hdr_msqueue_hzp.cpp create mode 100644 tests/test-hdr/queue/hdr_msqueue_ptb.cpp create mode 100644 tests/test-hdr/queue/hdr_optimistic_hzp.cpp create mode 100644 tests/test-hdr/queue/hdr_optimistic_ptb.cpp create mode 100644 tests/test-hdr/queue/hdr_rwqueue.cpp create mode 100644 tests/test-hdr/queue/hdr_segmented_queue.h create mode 100644 tests/test-hdr/queue/hdr_segmented_queue_hp.cpp create mode 100644 tests/test-hdr/queue/hdr_segmented_queue_ptb.cpp create mode 100644 tests/test-hdr/queue/hdr_vyukov_mpmc_cyclic.cpp create mode 100644 tests/test-hdr/queue/queue_test_header.cpp create mode 100644 tests/test-hdr/queue/queue_test_header.h create mode 100644 tests/test-hdr/set/hdr_cuckoo_set.cpp create mode 100644 tests/test-hdr/set/hdr_cuckoo_set.h create mode 100644 tests/test-hdr/set/hdr_intrusive_cuckoo_refinable_set.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_cuckoo_set.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_cuckoo_set.h create mode 100644 tests/test-hdr/set/hdr_intrusive_michael_set_hp.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_michael_set_hp_lazy.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_michael_set_hrc.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_michael_set_hrc_lazy.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_michael_set_nogc.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_michael_set_nogc_lazy.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_michael_set_ptb.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_michael_set_ptb_lazy.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_michael_set_rcu_gpb.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_michael_set_rcu_gpb_lazy.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_michael_set_rcu_gpi.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_michael_set_rcu_gpi_lazy.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_michael_set_rcu_gpt.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_michael_set_rcu_gpt_lazy.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_michael_set_rcu_shb.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_michael_set_rcu_shb_lazy.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_michael_set_rcu_sht.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_michael_set_rcu_sht_lazy.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_refinable_hashset_avlset.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_refinable_hashset_list.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_refinable_hashset_set.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_refinable_hashset_sgset.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_refinable_hashset_slist.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_refinable_hashset_splayset.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_refinable_hashset_treapset.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_refinable_hashset_uset.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_set.h create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_hp.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_hp_member.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_hrc.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_nogc.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_nogc_member.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_ptb.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_ptb_member.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_rcu_gpb.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_rcu_gpb_member.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_rcu_gpi.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_rcu_gpi_member.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_rcu_gpt.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_rcu_gpt_member.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_rcu_shb.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_rcu_shb_member.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_rcu_sht.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_rcu_sht_member.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_set.h create mode 100644 tests/test-hdr/set/hdr_intrusive_skiplist_set_rcu.h create mode 100644 tests/test-hdr/set/hdr_intrusive_splitlist_set_hp.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_splitlist_set_hp_lazy.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_splitlist_set_hrc.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_splitlist_set_hrc_lazy.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_splitlist_set_nogc.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_splitlist_set_nogc_lazy.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_splitlist_set_ptb.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_splitlist_set_ptb_lazy.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_splitlist_set_rcu_gpb.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_splitlist_set_rcu_gpb_lazy.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_splitlist_set_rcu_gpi.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_splitlist_set_rcu_gpi_lazy.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_splitlist_set_rcu_gpt.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_splitlist_set_rcu_gpt_lazy.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_splitlist_set_rcu_shb.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_splitlist_set_rcu_shb_lazy.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_splitlist_set_rcu_sht.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_splitlist_set_rcu_sht_lazy.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_striped_hashset_avlset.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_striped_hashset_list.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_striped_hashset_set.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_striped_hashset_sgset.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_striped_hashset_slist.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_striped_hashset_splayset.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_striped_hashset_treapset.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_striped_hashset_uset.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_striped_set.cpp create mode 100644 tests/test-hdr/set/hdr_intrusive_striped_set.h create mode 100644 tests/test-hdr/set/hdr_michael_set_hp.cpp create mode 100644 tests/test-hdr/set/hdr_michael_set_hrc.cpp create mode 100644 tests/test-hdr/set/hdr_michael_set_lazy_hp.cpp create mode 100644 tests/test-hdr/set/hdr_michael_set_lazy_hrc.cpp create mode 100644 tests/test-hdr/set/hdr_michael_set_lazy_nogc.cpp create mode 100644 tests/test-hdr/set/hdr_michael_set_lazy_ptb.cpp create mode 100644 tests/test-hdr/set/hdr_michael_set_lazy_rcu_gpb.cpp create mode 100644 tests/test-hdr/set/hdr_michael_set_lazy_rcu_gpi.cpp create mode 100644 tests/test-hdr/set/hdr_michael_set_lazy_rcu_gpt.cpp create mode 100644 tests/test-hdr/set/hdr_michael_set_lazy_rcu_shb.cpp create mode 100644 tests/test-hdr/set/hdr_michael_set_lazy_rcu_sht.cpp create mode 100644 tests/test-hdr/set/hdr_michael_set_nogc.cpp create mode 100644 tests/test-hdr/set/hdr_michael_set_ptb.cpp create mode 100644 tests/test-hdr/set/hdr_michael_set_rcu_gpb.cpp create mode 100644 tests/test-hdr/set/hdr_michael_set_rcu_gpi.cpp create mode 100644 tests/test-hdr/set/hdr_michael_set_rcu_gpt.cpp create mode 100644 tests/test-hdr/set/hdr_michael_set_rcu_shb.cpp create mode 100644 tests/test-hdr/set/hdr_michael_set_rcu_sht.cpp create mode 100644 tests/test-hdr/set/hdr_refinable_hashset_boost_flat_set.cpp create mode 100644 tests/test-hdr/set/hdr_refinable_hashset_boost_list.cpp create mode 100644 tests/test-hdr/set/hdr_refinable_hashset_boost_set.cpp create mode 100644 tests/test-hdr/set/hdr_refinable_hashset_boost_stable_vector.cpp create mode 100644 tests/test-hdr/set/hdr_refinable_hashset_boost_unordered_set.cpp create mode 100644 tests/test-hdr/set/hdr_refinable_hashset_boost_vector.cpp create mode 100644 tests/test-hdr/set/hdr_refinable_hashset_hashset_std.cpp create mode 100644 tests/test-hdr/set/hdr_refinable_hashset_hashset_vc.cpp create mode 100644 tests/test-hdr/set/hdr_refinable_hashset_list.cpp create mode 100644 tests/test-hdr/set/hdr_refinable_hashset_set.cpp create mode 100644 tests/test-hdr/set/hdr_refinable_hashset_slist.cpp create mode 100644 tests/test-hdr/set/hdr_refinable_hashset_vector.cpp create mode 100644 tests/test-hdr/set/hdr_set.h create mode 100644 tests/test-hdr/set/hdr_skiplist_set.h create mode 100644 tests/test-hdr/set/hdr_skiplist_set_hp.cpp create mode 100644 tests/test-hdr/set/hdr_skiplist_set_hrc.cpp create mode 100644 tests/test-hdr/set/hdr_skiplist_set_nogc.cpp create mode 100644 tests/test-hdr/set/hdr_skiplist_set_ptb.cpp create mode 100644 tests/test-hdr/set/hdr_skiplist_set_rcu.h create mode 100644 tests/test-hdr/set/hdr_skiplist_set_rcu_gpb.cpp create mode 100644 tests/test-hdr/set/hdr_skiplist_set_rcu_gpi.cpp create mode 100644 tests/test-hdr/set/hdr_skiplist_set_rcu_gpt.cpp create mode 100644 tests/test-hdr/set/hdr_skiplist_set_rcu_shb.cpp create mode 100644 tests/test-hdr/set/hdr_skiplist_set_rcu_sht.cpp create mode 100644 tests/test-hdr/set/hdr_splitlist_set_hp.cpp create mode 100644 tests/test-hdr/set/hdr_splitlist_set_hrc.cpp create mode 100644 tests/test-hdr/set/hdr_splitlist_set_lazy_hp.cpp create mode 100644 tests/test-hdr/set/hdr_splitlist_set_lazy_hrc.cpp create mode 100644 tests/test-hdr/set/hdr_splitlist_set_lazy_nogc.cpp create mode 100644 tests/test-hdr/set/hdr_splitlist_set_lazy_ptb.cpp create mode 100644 tests/test-hdr/set/hdr_splitlist_set_lazy_rcu_gpb.cpp create mode 100644 tests/test-hdr/set/hdr_splitlist_set_lazy_rcu_gpi.cpp create mode 100644 tests/test-hdr/set/hdr_splitlist_set_lazy_rcu_gpt.cpp create mode 100644 tests/test-hdr/set/hdr_splitlist_set_lazy_rcu_shb.cpp create mode 100644 tests/test-hdr/set/hdr_splitlist_set_lazy_rcu_sht.cpp create mode 100644 tests/test-hdr/set/hdr_splitlist_set_nogc.cpp create mode 100644 tests/test-hdr/set/hdr_splitlist_set_ptb.cpp create mode 100644 tests/test-hdr/set/hdr_splitlist_set_rcu_gpb.cpp create mode 100644 tests/test-hdr/set/hdr_splitlist_set_rcu_gpi.cpp create mode 100644 tests/test-hdr/set/hdr_splitlist_set_rcu_gpt.cpp create mode 100644 tests/test-hdr/set/hdr_splitlist_set_rcu_shb.cpp create mode 100644 tests/test-hdr/set/hdr_splitlist_set_rcu_sht.cpp create mode 100644 tests/test-hdr/set/hdr_striped_hashset_boost_flat_set.cpp create mode 100644 tests/test-hdr/set/hdr_striped_hashset_boost_list.cpp create mode 100644 tests/test-hdr/set/hdr_striped_hashset_boost_set.cpp create mode 100644 tests/test-hdr/set/hdr_striped_hashset_boost_stable_vector.cpp create mode 100644 tests/test-hdr/set/hdr_striped_hashset_boost_unordered_set.cpp create mode 100644 tests/test-hdr/set/hdr_striped_hashset_boost_vector.cpp create mode 100644 tests/test-hdr/set/hdr_striped_hashset_hashset_std.cpp create mode 100644 tests/test-hdr/set/hdr_striped_hashset_hashset_vc.cpp create mode 100644 tests/test-hdr/set/hdr_striped_hashset_list.cpp create mode 100644 tests/test-hdr/set/hdr_striped_hashset_set.cpp create mode 100644 tests/test-hdr/set/hdr_striped_hashset_slist.cpp create mode 100644 tests/test-hdr/set/hdr_striped_hashset_vector.cpp create mode 100644 tests/test-hdr/set/hdr_striped_set.h create mode 100644 tests/test-hdr/set/intrusive_cuckoo_set_common.h create mode 100644 tests/test-hdr/size_check.h create mode 100644 tests/test-hdr/stack/hdr_elimination_stack_hp.cpp create mode 100644 tests/test-hdr/stack/hdr_elimination_stack_hrc.cpp create mode 100644 tests/test-hdr/stack/hdr_elimination_stack_ptb.cpp create mode 100644 tests/test-hdr/stack/hdr_fcstack.cpp create mode 100644 tests/test-hdr/stack/hdr_intrusive_elimination_stack_hp.cpp create mode 100644 tests/test-hdr/stack/hdr_intrusive_elimination_stack_hrc.cpp create mode 100644 tests/test-hdr/stack/hdr_intrusive_elimination_stack_ptb.cpp create mode 100644 tests/test-hdr/stack/hdr_intrusive_fcstack.cpp create mode 100644 tests/test-hdr/stack/hdr_intrusive_treiber_stack.h create mode 100644 tests/test-hdr/stack/hdr_intrusive_treiber_stack_hp.cpp create mode 100644 tests/test-hdr/stack/hdr_intrusive_treiber_stack_hrc.cpp create mode 100644 tests/test-hdr/stack/hdr_intrusive_treiber_stack_ptb.cpp create mode 100644 tests/test-hdr/stack/hdr_treiber_stack.h create mode 100644 tests/test-hdr/stack/hdr_treiber_stack_hp.cpp create mode 100644 tests/test-hdr/stack/hdr_treiber_stack_hrc.cpp create mode 100644 tests/test-hdr/stack/hdr_treiber_stack_ptb.cpp create mode 100644 tests/test-hdr/tree/hdr_ellenbintree_map.h create mode 100644 tests/test-hdr/tree/hdr_ellenbintree_map_hp.cpp create mode 100644 tests/test-hdr/tree/hdr_ellenbintree_map_ptb.cpp create mode 100644 tests/test-hdr/tree/hdr_ellenbintree_map_rcu_gpb.cpp create mode 100644 tests/test-hdr/tree/hdr_ellenbintree_map_rcu_gpi.cpp create mode 100644 tests/test-hdr/tree/hdr_ellenbintree_map_rcu_gpt.cpp create mode 100644 tests/test-hdr/tree/hdr_ellenbintree_map_rcu_shb.cpp create mode 100644 tests/test-hdr/tree/hdr_ellenbintree_map_rcu_sht.cpp create mode 100644 tests/test-hdr/tree/hdr_ellenbintree_set.h create mode 100644 tests/test-hdr/tree/hdr_ellenbintree_set_hp.cpp create mode 100644 tests/test-hdr/tree/hdr_ellenbintree_set_ptb.cpp create mode 100644 tests/test-hdr/tree/hdr_ellenbintree_set_rcu_gpb.cpp create mode 100644 tests/test-hdr/tree/hdr_ellenbintree_set_rcu_gpi.cpp create mode 100644 tests/test-hdr/tree/hdr_ellenbintree_set_rcu_gpt.cpp create mode 100644 tests/test-hdr/tree/hdr_ellenbintree_set_rcu_shb.cpp create mode 100644 tests/test-hdr/tree/hdr_ellenbintree_set_rcu_sht.cpp create mode 100644 tests/test-hdr/tree/hdr_intrusive_bintree.h create mode 100644 tests/test-hdr/tree/hdr_intrusive_ellen_bintree_hp.cpp create mode 100644 tests/test-hdr/tree/hdr_intrusive_ellen_bintree_hp_member.cpp create mode 100644 tests/test-hdr/tree/hdr_intrusive_ellen_bintree_pool_hp.h create mode 100644 tests/test-hdr/tree/hdr_intrusive_ellen_bintree_pool_ptb.h create mode 100644 tests/test-hdr/tree/hdr_intrusive_ellen_bintree_pool_rcu.h create mode 100644 tests/test-hdr/tree/hdr_intrusive_ellen_bintree_ptb.cpp create mode 100644 tests/test-hdr/tree/hdr_intrusive_ellen_bintree_ptb_member.cpp create mode 100644 tests/test-hdr/tree/hdr_intrusive_ellen_bintree_rcu_gpb.cpp create mode 100644 tests/test-hdr/tree/hdr_intrusive_ellen_bintree_rcu_gpb_member.cpp create mode 100644 tests/test-hdr/tree/hdr_intrusive_ellen_bintree_rcu_gpi.cpp create mode 100644 tests/test-hdr/tree/hdr_intrusive_ellen_bintree_rcu_gpi_member.cpp create mode 100644 tests/test-hdr/tree/hdr_intrusive_ellen_bintree_rcu_gpt.cpp create mode 100644 tests/test-hdr/tree/hdr_intrusive_ellen_bintree_rcu_gpt_member.cpp create mode 100644 tests/test-hdr/tree/hdr_intrusive_ellen_bintree_rcu_shb.cpp create mode 100644 tests/test-hdr/tree/hdr_intrusive_ellen_bintree_rcu_shb_member.cpp create mode 100644 tests/test-hdr/tree/hdr_intrusive_ellen_bintree_rcu_sht.cpp create mode 100644 tests/test-hdr/tree/hdr_intrusive_ellen_bintree_rcu_sht_member.cpp create mode 100644 tests/test-hdr/tree/hdr_tree_reg.cpp create mode 100644 tests/unit/_template.cpp create mode 100644 tests/unit/alloc/hoard_threadtest.cpp create mode 100644 tests/unit/alloc/larson.cpp create mode 100644 tests/unit/alloc/linux_scale.cpp create mode 100644 tests/unit/alloc/michael_allocator.cpp create mode 100644 tests/unit/alloc/michael_allocator.h create mode 100644 tests/unit/alloc/random.cpp create mode 100644 tests/unit/alloc/random_gen.h create mode 100644 tests/unit/ellen_bintree_update_desc_pool.cpp create mode 100644 tests/unit/ellen_bintree_update_desc_pool.h create mode 100644 tests/unit/lock/nolock.h create mode 100644 tests/unit/lock/spinlock.cpp create mode 100644 tests/unit/lock/win32_lock.h create mode 100644 tests/unit/map2/map_defs.h create mode 100644 tests/unit/map2/map_delodd.cpp create mode 100644 tests/unit/map2/map_find_int.cpp create mode 100644 tests/unit/map2/map_find_string.cpp create mode 100644 tests/unit/map2/map_insdel_func.cpp create mode 100644 tests/unit/map2/map_insdel_int.cpp create mode 100644 tests/unit/map2/map_insdel_item_int.cpp create mode 100644 tests/unit/map2/map_insdel_item_string.cpp create mode 100644 tests/unit/map2/map_insdel_string.cpp create mode 100644 tests/unit/map2/map_insdelfind.cpp create mode 100644 tests/unit/map2/map_insfind_int.cpp create mode 100644 tests/unit/map2/map_types.h create mode 100644 tests/unit/map2/std_hash_map.h create mode 100644 tests/unit/map2/std_hash_map_gcc.h create mode 100644 tests/unit/map2/std_hash_map_vc.h create mode 100644 tests/unit/map2/std_map.h create mode 100644 tests/unit/map2/std_map_gcc.h create mode 100644 tests/unit/map2/std_map_vc.h create mode 100644 tests/unit/michael_alloc.cpp create mode 100644 tests/unit/michael_alloc.h create mode 100644 tests/unit/nonconcurrent_iterator_sequence.h create mode 100644 tests/unit/pqueue/ellen_bintree_pqueue.h create mode 100644 tests/unit/pqueue/pop.cpp create mode 100644 tests/unit/pqueue/pqueue_defs.h create mode 100644 tests/unit/pqueue/pqueue_item.h create mode 100644 tests/unit/pqueue/pqueue_type.h create mode 100644 tests/unit/pqueue/push.cpp create mode 100644 tests/unit/pqueue/push_pop.cpp create mode 100644 tests/unit/pqueue/skiplist_pqueue.h create mode 100644 tests/unit/pqueue/std_pqueue.h create mode 100644 tests/unit/print_cuckoo_stat.h create mode 100644 tests/unit/print_deque_stat.h create mode 100644 tests/unit/print_ellenbintree_stat.h create mode 100644 tests/unit/print_mspriorityqueue_stat.h create mode 100644 tests/unit/print_segmentedqueue_stat.h create mode 100644 tests/unit/print_skip_list_stat.h create mode 100644 tests/unit/queue/intrusive_queue_defs.h create mode 100644 tests/unit/queue/intrusive_queue_reader_writer.cpp create mode 100644 tests/unit/queue/intrusive_queue_type.h create mode 100644 tests/unit/queue/queue_defs.h create mode 100644 tests/unit/queue/queue_pop.cpp create mode 100644 tests/unit/queue/queue_push.cpp create mode 100644 tests/unit/queue/queue_random.cpp create mode 100644 tests/unit/queue/queue_reader_writer.cpp create mode 100644 tests/unit/queue/queue_type.h create mode 100644 tests/unit/queue/std_queue.h create mode 100644 tests/unit/set2/set_defs.h create mode 100644 tests/unit/set2/set_delodd.cpp create mode 100644 tests/unit/set2/set_insdel_func.cpp create mode 100644 tests/unit/set2/set_insdel_func.h create mode 100644 tests/unit/set2/set_insdel_func2.cpp create mode 100644 tests/unit/set2/set_insdel_func3.cpp create mode 100644 tests/unit/set2/set_insdel_func4.cpp create mode 100644 tests/unit/set2/set_insdel_func5.cpp create mode 100644 tests/unit/set2/set_insdel_func6.cpp create mode 100644 tests/unit/set2/set_insdel_func7.cpp create mode 100644 tests/unit/set2/set_insdel_string.cpp create mode 100644 tests/unit/set2/set_insdelfind.cpp create mode 100644 tests/unit/set2/set_types.h create mode 100644 tests/unit/set2/std_hash_set.h create mode 100644 tests/unit/set2/std_hash_set_std.h create mode 100644 tests/unit/set2/std_hash_set_vc9.h create mode 100644 tests/unit/set2/std_set.h create mode 100644 tests/unit/stack/intrusive_stack_defs.h create mode 100644 tests/unit/stack/intrusive_stack_type.h create mode 100644 tests/unit/stack/stack_defs.h create mode 100644 tests/unit/stack/stack_intrusive_pushpop.cpp create mode 100644 tests/unit/stack/stack_push.cpp create mode 100644 tests/unit/stack/stack_pushpop.cpp create mode 100644 tests/unit/stack/stack_type.h diff --git a/brush_cds.pl b/brush_cds.pl new file mode 100644 index 00000000..bd98cc05 --- /dev/null +++ b/brush_cds.pl @@ -0,0 +1,66 @@ +#!/usr/bin/perl + +my $nTabsFound = 0; + +brush() ; + +print "Tabs found: $nTabsFound\n" ; +print "Done\n" ; + +exit ; + +sub brush() +{ + processDir( "./cds" ) ; + processDir( "./src" ) ; + processDir( "./tests/test-hdr" ) ; + processDir( "./tests/unit" ) ; + processDir( "./tests/cppunit" ) ; +} + +sub processDir( $ ) +{ + my $dirName = shift ; + print "Process $dirName directory...\n"; + + opendir(my $dh, $dirName) || die "can't opendir $dirName: $!"; + my @files = grep { /^[^\.]/ } readdir($dh); + closedir $dh; + + foreach my $file ( @files ) { + if ( -d "$dirName/$file" ) { + processDir("$dirName/$file") ; + } + elsif ( $file =~ /\.(h|cpp)$/ ) { + processFile( "$dirName/$file" ) ; + } + } +} + +sub processFile( $ ) +{ + my $file = shift ; + + + if ( open( my $fh, $file )) { + binmode $fh ; + my $str = '' ; + while (<$fh>) { + $nTabsFound += $_ =~ s/\t/ /g; + $_ =~ s/\s+$// ; + $_ =~ s/\s+;$/;/; + $str .= $_ ; + $str .= "\n" ; + } + close $fh ; + + if ( open( my $fh, ">$file" )) { + binmode $fh ; + print $fh $str ; + close $fh ; + } + } +} + + + diff --git a/build/Makefile b/build/Makefile new file mode 100644 index 00000000..313afc01 --- /dev/null +++ b/build/Makefile @@ -0,0 +1,272 @@ +# Makefile +# + +.PHONY: all +.SUFFIXES:.cpp .c .ro + +# make_distrib.pl substitutes the correct version +VERSION=1.6.0 + +# boost_thread lib used by test application. +# You may change the library name +LD_BOOST_THREAD_LIB=-lboost_thread -lboost_system + +ifeq ($(platform),mingw) + LD_TEST_COMMON_LIBS=-lcds -lrtm + LD_TEST_COMMON_DEBUG_LIBS=-lcds-debug -lrtm + BASE_OPT = -D_REENTRANT -D_FILE_OFFSET_BITS=64 -I.. + # -Wa,--hash-size=2048 + CPP_BUILD_CDS_OPT=-DCDS_BUILD_LIB +else +ifeq ($(platform),darwin) + LD_TEST_COMMON_LIBS=-lcds + LD_TEST_COMMON_DEBUG_LIBS=-lcds-debug +else + LD_TEST_COMMON_LIBS=-lcds -lpthread -lrt + LD_TEST_COMMON_DEBUG_LIBS=-lcds-debug -lpthread -lrt +endif + BASE_OPT = -D_REENTRANT -D_POSIX_PTHREAD_SEMANTICS -D_FILE_OFFSET_BITS=64 -I.. + CPP_BUILD_CDS_OPT= +endif + +COMP_OPT = -c $(CFLAGS) $(BASE_OPT) +CPP_COMP_OPT = -std=c++0x -c $(CXXFLAGS) $(BASE_OPT) + +COMPILER_ROOT = $(shell dirname `dirname \`which $(CXX)\``) + +LD_OPTS = $(LDFLAGS) + + + +#################################### +# cds library + +include ../projects/source.libcds.mk +CDS_OBJS := $(addprefix $(OBJ_PATH)/,$(notdir $(CDS_SOURCES))) +CDS_OBJS :=$(CDS_OBJS:%.cpp=%.o) +CDS_SOURCES := $(CDS_SOURCES:%.cpp=../%.cpp) + +ifeq ($(platform),mingw) + CDS_DEBUG_LIB=libcds-debug.dll + CDS_RELEASE_LIB=libcds.dll +else +ifeq ($(platform),darwin) + CDS_DEBUG_LIB=libcds-debug.dylib + CDS_RELEASE_LIB=libcds.dylib +else + CDS_DEBUG_LIB=libcds-debug.so + CDS_RELEASE_LIB=libcds.so +endif +endif + +$(CDS_OBJS): $(OBJ_PATH)/%.o: ../src/%.cpp + $(CXX) $(CPP_COMP_OPT) $(CPP_BUILD_CDS_OPT) -o $@ $< + +ifeq ($(platform),mingw) +$(CDS_DEBUG_LIB): $(BIN_PATH)/$(CDS_DEBUG_LIB) + +$(BIN_PATH)/$(CDS_DEBUG_LIB) : $(CDS_OBJS) + $(CXX) $(LD_OPTS) -Wl,--out-implib,$(BIN_PATH)/$(CDS_DEBUG_LIB).a $(CDS_OBJS) -o $@ + +$(CDS_RELEASE_LIB) : $(BIN_PATH)/$(CDS_RELEASE_LIB) + +$(BIN_PATH)/$(CDS_RELEASE_LIB) : $(CDS_OBJS) + $(CXX) $(LD_OPTS) $(CDS_OBJS) -Wl,--out-implib,$(BIN_PATH)/$(CDS_RELEASE_LIB).a -o $@ + +debug : $(CDS_DEBUG_LIB) +release : $(CDS_RELEASE_LIB) +else +$(CDS_DEBUG_LIB).$(VERSION) : $(CDS_OBJS) + $(CXX) $(LD_OPTS) $(CDS_PLATFORM_RELEASE_LDFLAGS) $(CDS_OBJS) -o $@ + mv ./$(CDS_DEBUG_LIB).$(VERSION) $(BIN_PATH)/$(CDS_DEBUG_LIB).$(VERSION) + ln -sf $(CDS_DEBUG_LIB).$(VERSION) $(BIN_PATH)/$(CDS_DEBUG_LIB) + +$(CDS_RELEASE_LIB).$(VERSION) : $(CDS_OBJS) + $(CXX) $(LD_OPTS) $(CDS_OBJS) $(CDS_PLATFORM_DEBUG_LDFLAGS) -o $@ + mv ./$(CDS_RELEASE_LIB).$(VERSION) $(BIN_PATH)/$(CDS_RELEASE_LIB).$(VERSION) + ln -sf $(CDS_RELEASE_LIB).$(VERSION) $(BIN_PATH)/$(CDS_RELEASE_LIB) + +debug : $(CDS_DEBUG_LIB).$(VERSION) +release : $(CDS_RELEASE_LIB).$(VERSION) + +endif + +all: debug release + +########################################## +# Make tests + +OBJ_TEST_PATH=$(OBJ_PATH) + +include ../projects/source.test-common.mk +CDS_TESTCOMMON_SOURCES := $(CDS_TESTCOMMON_SOURCES:%.cpp=../%.cpp) +TEST_COMMON_OBJS := $(CDS_TESTCOMMON_SOURCES:%.cpp=%.o) + +TEST_COMMONHDR_SRC_DIR=../tests +$(TEST_COMMON_OBJS) : %.o : %.cpp + $(CXX) $(CPP_COMP_OPT) -I$(TEST_COMMONHDR_SRC_DIR) $< -o $@ + + +include ../projects/source.test-hdr.mk +CDS_TESTHDR_SOURCES := $(CDS_TESTHDR_SOURCES:%.cpp=../%.cpp) +TESTHDR_OBJS := $(CDS_TESTHDR_SOURCES:%.cpp=%.o) + +TESTHDR_SRC_DIR=../tests/test-hdr +$(TESTHDR_OBJS): %.o: %.cpp + $(CXX) $(CPP_COMP_OPT) -I$(TESTHDR_SRC_DIR) -I$(TEST_COMMONHDR_SRC_DIR) $< -o $@ + +include ../projects/source.test-hdr.offsetof.mk +CDS_TESTHDR_OFFSETOF_SOURCES := $(CDS_TESTHDR_OFFSETOF_SOURCES:%.cpp=../%.cpp) +TESTHDR_OBJS_NO_OFFSETOF_WARN := $(CDS_TESTHDR_OFFSETOF_SOURCES:%.cpp=%.o) + +$(TESTHDR_OBJS_NO_OFFSETOF_WARN): %.o: %.cpp + $(CXX) $(CPP_COMP_OPT) -I$(TESTHDR_SRC_DIR) -I$(TEST_COMMONHDR_SRC_DIR) -Wno-invalid-offsetof $< -o $@ + + +TEST_SRC_DIR=../tests/unit +TEST_DATA_DIR=`pwd`/../tests/data + +CDSUNIT_COMMON_FILE= + +include ../projects/source.unit.map.mk +CDSUNIT_MAP_SOURCES := $(CDSUNIT_MAP_SOURCES:%.cpp=../%.cpp) +CDSUNIT_MAP_FILE := $(CDSUNIT_MAP_SOURCES:%.cpp=%.o) + +include ../projects/source.unit.set.mk +CDSUNIT_SET_SOURCES := $(CDSUNIT_SET_SOURCES:%.cpp=../%.cpp) +CDSUNIT_SET_FILE := $(CDSUNIT_SET_SOURCES:%.cpp=%.o) + +include ../projects/source.unit.queue.mk +CDSUNIT_QUEUE_SOURCES := $(CDSUNIT_QUEUE_SOURCES:%.cpp=../%.cpp) +CDSUNIT_QUEUE_FILE := $(CDSUNIT_QUEUE_SOURCES:%.cpp=%.o) + +include ../projects/source.unit.pqueue.mk +CDSUNIT_PQUEUE_SOURCES := $(CDSUNIT_PQUEUE_SOURCES:%.cpp=../%.cpp) +CDSUNIT_PQUEUE_FILE := $(CDSUNIT_PQUEUE_SOURCES:%.cpp=%.o) + +include ../projects/source.unit.stack.mk +CDSUNIT_STACK_SOURCES := $(CDSUNIT_STACK_SOURCES:%.cpp=../%.cpp) +CDSUNIT_STACK_FILE := $(CDSUNIT_STACK_SOURCES:%.cpp=%.o) + +include ../projects/source.unit.misc.mk +CDSUNIT_MISC_SOURCES := $(CDSUNIT_MISC_SOURCES:%.cpp=../%.cpp) +CDSUNIT_MISC_FILE := $(CDSUNIT_MISC_SOURCES:%.cpp=%.o) + +TEST_OBJ_FILE= $(CDSUNIT_COMMON_FILE) $(CDSUNIT_MAP_FILE) $(CDSUNIT_SET_FILE) $(CDSUNIT_QUEUE_FILE) $(CDSUNIT_PQUEUE_FILE) \ + $(CDSUNIT_STACK_FILE) $(CDSUNIT_MISC_FILE) + +$(TEST_OBJ_FILE): %.o: %.cpp + $(CXX) $(CPP_COMP_OPT) -I$(TEST_SRC_DIR) -I$(TEST_COMMONHDR_SRC_DIR) $< -o $@ + +CDSUNIT_MAP_EXE=$(BIN_PATH)/cdsu-map +CDSUNIT_SET_EXE=$(BIN_PATH)/cdsu-set +CDSUNIT_QUEUE_EXE=$(BIN_PATH)/cdsu-queue +CDSUNIT_PQUEUE_EXE=$(BIN_PATH)/cdsu-pqueue +CDSUNIT_STACK_EXE=$(BIN_PATH)/cdsu-stack +CDSUNIT_MISC_EXE=$(BIN_PATH)/cdsu-misc +CDSUNIT_EXE_FILES= $(CDSUNIT_MAP_EXE) $(CDSUNIT_SET_EXE) $(CDSUNIT_QUEUE_EXE) $(CDSUNIT_PQUEUE_EXE) $(CDSUNIT_STACK_EXE) $(CDSUNIT_MISC_EXE) + +unit-map: $(CDSUNIT_MAP_EXE) +unit-set: $(CDSUNIT_SET_EXE) +unit-queue: $(CDSUNIT_QUEUE_EXE) +unit-pqueue: $(CDSUNIT_PQUEUE_EXE) +unit-stack: $(CDSUNIT_STACK_EXE) + +ifeq ($(platform),mingw) +make_test : $(BIN_PATH)/test-hdr $(CDSUNIT_EXE_FILES) + cd $(TEST_DATA_DIR); perl -X split.pl + cp -f $(TEST_DATA_DIR)/test.conf $(TEST_DATA_DIR)/test-debug.conf $(TEST_DATA_DIR)/dictionary.txt $(BIN_PATH) +else +make_test : $(BIN_PATH)/test-hdr $(CDSUNIT_EXE_FILES) + cd $(TEST_DATA_DIR); perl -X split.pl + ln -sf $(TEST_DATA_DIR)/test.conf $(TEST_DATA_DIR)/test-debug.conf $(TEST_DATA_DIR)/dictionary.txt $(BIN_PATH) +endif + +$(BIN_PATH)/test-hdr : $(TEST_COMMON_OBJS) $(TESTHDR_OBJS) $(TESTHDR_OBJS_NO_OFFSETOF_WARN) + $(CXX) $(LD_OPTS) -L$(BIN_PATH) \ + $(TESTHDR_OBJS) $(TESTHDR_OBJS_NO_OFFSETOF_WARN) $(TEST_COMMON_OBJS) \ + $(LD_BOOST_THREAD_LIB) $(LD_TEST_COMMON_LIBS) -o $@ + +$(CDSUNIT_MAP_EXE) : $(CDSUNIT_MAP_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) + $(CXX) $(LD_OPTS) -L$(BIN_PATH) $(CDSUNIT_MAP_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) $(LD_BOOST_THREAD_LIB) $(LD_TEST_COMMON_LIBS) -o $@ + +$(CDSUNIT_SET_EXE) : $(CDSUNIT_SET_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) + $(CXX) $(LD_OPTS) -L$(BIN_PATH) $(CDSUNIT_SET_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) $(LD_BOOST_THREAD_LIB) $(LD_TEST_COMMON_LIBS) -o $@ + +$(CDSUNIT_QUEUE_EXE) : $(CDSUNIT_QUEUE_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) + $(CXX) $(LD_OPTS) -L$(BIN_PATH) $(CDSUNIT_QUEUE_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) $(LD_BOOST_THREAD_LIB) $(LD_TEST_COMMON_LIBS) -o $@ + +$(CDSUNIT_PQUEUE_EXE) : $(CDSUNIT_PQUEUE_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) + $(CXX) $(LD_OPTS) -L$(BIN_PATH) $(CDSUNIT_PQUEUE_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) $(LD_BOOST_THREAD_LIB) $(LD_TEST_COMMON_LIBS) -o $@ + +$(CDSUNIT_STACK_EXE) : $(CDSUNIT_STACK_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) + $(CXX) $(LD_OPTS) -L$(BIN_PATH) $(CDSUNIT_STACK_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) $(LD_BOOST_THREAD_LIB) $(LD_TEST_COMMON_LIBS) -o $@ + +$(CDSUNIT_MISC_EXE) : $(CDSUNIT_MISC_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) + $(CXX) $(LD_OPTS) -L$(BIN_PATH) $(CDSUNIT_MISC_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) $(LD_BOOST_THREAD_LIB) $(LD_TEST_COMMON_LIBS) -o $@ + + +CDSUNIT_MAP_EXE_DBG=$(CDSUNIT_MAP_EXE)-d +CDSUNIT_SET_EXE_DBG=$(CDSUNIT_SET_EXE)-d +CDSUNIT_QUEUE_EXE_DBG=$(CDSUNIT_QUEUE_EXE)-d +CDSUNIT_PQUEUE_EXE_DBG=$(CDSUNIT_PQUEUE_EXE)-d +CDSUNIT_STACK_EXE_DBG=$(CDSUNIT_STACK_EXE)-d +CDSUNIT_MISC_EXE_DBG=$(CDSUNIT_MISC_EXE)-d +CDSUNIT_EXE_DBG_FILES= $(CDSUNIT_MAP_EXE_DBG) $(CDSUNIT_SET_EXE_DBG) $(CDSUNIT_QUEUE_EXE_DBG) $(CDSUNIT_PQUEUE_EXE_DBG) \ + $(CDSUNIT_STACK_EXE_DBG) $(CDSUNIT_MISC_EXE_DBG) + +unit-map-dbg: $(CDSUNIT_MAP_EXE_DBG) +unit-set-dbg: $(CDSUNIT_SET_EXE_DBG) +unit-queue-dbg: $(CDSUNIT_QUEUE_EXE_DBG) +unit-pqueue-dbg: $(CDSUNIT_PQUEUE_EXE_DBG) +unit-stack-dbg: $(CDSUNIT_STACK_EXE_DBG) + +ifeq ($(platform),mingw) +make_debug_test : $(BIN_PATH)/test-hdr-debug $(CDSUNIT_EXE_DBG_FILES) + cd $(TEST_DATA_DIR); perl -X split.pl + cp -f $(TEST_DATA_DIR)/test.conf $(TEST_DATA_DIR)/test-debug.conf $(TEST_DATA_DIR)/dictionary.txt $(BIN_PATH) +else +make_debug_test : $(BIN_PATH)/test-hdr-debug $(CDSUNIT_EXE_DBG_FILES) + cd $(TEST_DATA_DIR); perl -X split.pl + ln -sf $(TEST_DATA_DIR)/test.conf $(TEST_DATA_DIR)/test-debug.conf $(TEST_DATA_DIR)/dictionary.txt $(BIN_PATH) +endif + +$(BIN_PATH)/test-hdr-debug : $(TESTHDR_OBJS) $(TESTHDR_OBJS_NO_OFFSETOF_WARN) $(TEST_COMMON_OBJS) + $(CXX) $(LD_OPTS) -L$(BIN_PATH) \ + $(TESTHDR_OBJS) $(TESTHDR_OBJS_NO_OFFSETOF_WARN) $(TEST_COMMON_OBJS) \ + $(LD_BOOST_THREAD_LIB) $(LD_TEST_COMMON_DEBUG_LIBS) -o $@ + +$(CDSUNIT_MAP_EXE_DBG) : $(CDSUNIT_MAP_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) + $(CXX) $(LD_OPTS) -L$(BIN_PATH) $(CDSUNIT_MAP_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) $(LD_BOOST_THREAD_LIB) $(LD_TEST_COMMON_DEBUG_LIBS) -o $@ + +$(CDSUNIT_SET_EXE_DBG) : $(CDSUNIT_SET_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) + $(CXX) $(LD_OPTS) -L$(BIN_PATH) $(CDSUNIT_SET_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) $(LD_BOOST_THREAD_LIB) $(LD_TEST_COMMON_DEBUG_LIBS) -o $@ + +$(CDSUNIT_QUEUE_EXE_DBG) : $(CDSUNIT_QUEUE_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) + $(CXX) $(LD_OPTS) -L$(BIN_PATH) $(CDSUNIT_QUEUE_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) $(LD_BOOST_THREAD_LIB) $(LD_TEST_COMMON_DEBUG_LIBS) -o $@ + +$(CDSUNIT_PQUEUE_EXE_DBG) : $(CDSUNIT_PQUEUE_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) + $(CXX) $(LD_OPTS) -L$(BIN_PATH) $(CDSUNIT_PQUEUE_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) $(LD_BOOST_THREAD_LIB) $(LD_TEST_COMMON_DEBUG_LIBS) -o $@ + +$(CDSUNIT_STACK_EXE_DBG) : $(CDSUNIT_STACK_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) + $(CXX) $(LD_OPTS) -L$(BIN_PATH) $(CDSUNIT_STACK_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) $(LD_BOOST_THREAD_LIB) $(LD_TEST_COMMON_DEBUG_LIBS) -o $@ + +$(CDSUNIT_MISC_EXE_DBG) : $(CDSUNIT_MISC_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) + $(CXX) $(LD_OPTS) -L$(BIN_PATH) $(CDSUNIT_MISC_FILE) $(CDSUNIT_COMMON_FILE) $(TEST_COMMON_OBJS) $(LD_BOOST_THREAD_LIB) $(LD_TEST_COMMON_DEBUG_LIBS) -o $@ + +test: make_test +test_debug: make_debug_test + +test_hdr: $(BIN_PATH)/test-hdr +test_hdr_debug: $(BIN_PATH)/test-hdr-debug + +########################################## +# +clean: + rm -f $(OBJ_PATH)/debug/* + rm -f $(OBJ_PATH)/release/* + rm -f $(TEST_COMMON_OBJS) $(TESTHDR_OBJS) $(TESTHDR_OBJS_NO_OFFSETOF_WARN) $(TEST_OBJ_FILE) + rm -f $(BIN_PATH)/libcds* + rm -f $(BIN_PATH)/cdsu-* + rm -f $(BIN_PATH)/test-hdr + diff --git a/build/build.sh b/build/build.sh new file mode 100644 index 00000000..8c14e716 --- /dev/null +++ b/build/build.sh @@ -0,0 +1,555 @@ +#!/bin/sh + +# cds library build script +# Maxim Khiszinsky 04.01.2009 + +# The following variables are defined and exported at the end of this script. +# +# LDFLAGS +# CFLAGS +# CXXFLAGS +# CXX +# CC +# BITSTOBUILD + +usage() +{ + echo "Build helper script for one of the supported platforms" + echo "Usage: build.sh \"options\"" + echo " where options may be any of the following:" + echo " -t make target" + echo " -c Possible values are: gcc,clang" + echo " -x (e.g. g++, CC)" + echo " -p Possible values are:" + echo " x86, amd64 (x86_64), sparc, ia64" + echo " -o Possible values are:" + echo " linux, sunos (solaris), hpux, darwin" + echo " -D define" + echo " -b (accepts '64', '32')" + echo " -l " + echo " -z " + echo " -j " + echo " -h (to get help on the above commands)" + echo " --with-boost " + echo " --debug-cxx-options " + echo " --debug-ld-options " + echo " --release-cxx-options " + echo " --release-ld-options " + echo " --clean clean all before building" + echo " --debug-test make unit test in debug mode" + echo " --amd64-use-128bit use 128bit (16byte) CAS on amd64" + echo " --arch-tune march flag (only for x86/amd64), default = native" + echo " --nodefaultlibs - no default libs (pthread, stdc++)" + echo " --optimize-flags - optimization level flags for release target, default -O3" +} + +ERROR_EXIT_CODE=1 + +MAKE=make + +# Set up the default values for each parameter +debug=off # by default debug is off +bitsToBuild=0 # unknown +makejobs=2 +cppcompiler=g++ +ccompiler=gcc +processor_arch=unknown +OS_FAMILY=unknown +ArchFlag=native +ld_nodefaultlibs=off + +BOOST_INCLUDE_PATH= +makeclean=off + +MAKE_DEBUG_TEST=0 +ld_libs="-lpthread -ldl -lstdc++" + +cxx_debug_options= +ld_debug_options= + +cxx_release_options= +ld_release_options= + +cxx_test_release_options= +ls_test_release_options= + +cxx_release_optimization="-fno-strict-aliasing" +cxx_release_optimization_level="-O3" + +amd64_cxx_options= + +OS_VERSION= +TOOLSET_SUFFIX= + +target=test + +while [ $# -gt 0 ] + do + case $1 in + -t) + target=$2 + shift 2 + ;; + -c) + ccompiler=$2 + shift 2 + ;; + -x) + cppcompiler=$2 + shift 2 + ;; + -o) + OS_FAMILY=$2 + shift 2 + ;; + -p) + processor_arch=$2; shift 2 + ;; + -b) + bitsToBuild=$2 + shift 2 + ;; + -l) + linkeroptions="$linkeroptions $2" + shift 2 + ;; + -z) + compileroptions="$compileroptions $2" + shift 2 + ;; + -j) + makejobs=$2 + shift 2 + ;; + -h) + usage + exit $ERROR_EXIT_CODE;; + + --clean) + makeclean=on + shift + ;; + --with-boost) + BOOST_INCLUDE_PATH=$2 + shift 2 + ;; + --debug-cxx-options) + cxx_debug_options=$2 + shift 2 + ;; + --debug-ld-options) + ld_debug_options=$2 + shift 2 + ;; + --release-cxx-options) + cxx_release_options=$2 + shift 2 + ;; + --optimize-flags) + cxx_release_optimization_level=$2 + shift 2 + ;; + --release-ld-options) + ld_release_options=$2 + shift 2 + ;; + --nodefaultlibs) + ld_libs=" " + shift + ;; + --with-make) + MAKE=$2 + shift 2 + ;; + --platform-suffix) + OS_VERSION=$2 + shift 2 + ;; + --toolset-suffix) + TOOLSET_SUFFIX=$2 + shift 2 + ;; + --debug-test) + MAKE_DEBUG_TEST=1 + if test $target = 'test'; then + target=test_debug + fi + shift 1 + ;; + --amd64-use-128bit) + amd64_cxx_options='-mcx16' + shift 1 + ;; + --arch-tune) + ArchFlag=$2 + shift 2 + ;; + --) + shift; break;; + + *) + echo "unknown option $1" + usage + exit $ERROR_EXIT_CODE;; + esac +done + +cxx_release_optimization="$cxx_release_optimization_level $cxx_release_optimization" + +# Determine compiler +case $ccompiler in + gcc) + if test $cppcompiler = ''; then + cppcompiler=g++ + fi + ;; + clang) + if test $cppcompiler = ''; then + cppcompiler=clang++ + fi + ;; + *) + echo "ERROR: Unknown compiler: $ccompiler" + exit $ERROR_EXIT_CODE + ;; +esac + +# Determine OS family +if test $OS_FAMILY = 'unknown'; then + OS_FAMILY=`uname |tr [A-Z] [a-z]|sed "s/-//"` +fi +case $OS_FAMILY in + hp-ux) + OS_FAMILY=hpux + ;; + solaris) + OS_FAMILY=sunos + ;; + mingw*) + OS_FAMILY=mingw + ;; + linux|sunos|hpux|aix|freebsd|mingw|darwin) + ;; + *) + echo "Warning: Unknown operation system: $OS_FAMILY" + #exit $ERROR_EXIT_CODE + ;; +esac + + +# Determine processor architecture +if test $processor_arch = 'unknown'; then + processor_arch=`uname -m|tr [A-Z] [a-z]` +fi +case $processor_arch in + x86_64) + if test $bitsToBuild = 64; then + processor_arch='amd64' + else + processor_arch='x86' + fi; + ;; + x86|i686) + if test $bitsToBuild = 64; then + processor_arch='amd64' + else + processor_arch='x86' + fi + ;; + sparc64) + processor_arch='sparc' + ;; + amd64|x86|ia64|sparc) + ;; + *) + processor_arch=`uname -p|tr [A-Z] [a-z]` + case $processor_arch in + sparc|powerpc) + ;; + *) + echo "Warning: Unknown processor architecture: $processor_arch" + #exit ${ERROR_EXIT_CODE} + ;; + esac + 1;; +esac + +# Determine compiler flags +case $ccompiler in + gcc|clang) + case $processor_arch in + amd64) + case $OS_FAMILY in + linux|freebsd|darwin) + buildCXXflags="-m64 -fPIC -march=$ArchFlag $amd64_cxx_options" + buildCflags="-m64 -fPIC -march=$ArchFlag $amd64_cxx_options" + buildLDflags="-m64 -fPIC" + buildTestLDflags="-m64 -fPIC" + ;; + mingw) + buildCXXflags="-m64 -march=$ArchFlag $amd64_cxx_options" + buildCflags="-m64 -march=$ArchFlag $amd64_cxx_options" + buildLDflags="-m64" + buildTestLDflags="-m64" + ld_libs="" + ;; + *) + echo "Warning: cannot determine compiler flags for processor $processor_arch, OS $OS_FAMILY, and compiler $ccompiler" + #exit ${ERROR_EXIT_CODE} + ;; + esac + ;; + x86) + case $OS_FAMILY in + linux|freebsd|darwin) + buildCXXflags="-m32 -fPIC -march=$ArchFlag" + buildCflags="-m32 -fPIC -march=$ArchFlag" + buildLDflags="-m32 -fPIC" + buildTestLDflags="-m32 -fPIC" + ;; + mingw) + buildCXXflags="-m32 -march=$ArchFlag" + buildCflags="-m32 -march=$ArchFlag" + buildLDflags="-m32" + buildTestLDflags="-m32" + ld_libs="" + ;; + *) + echo "Warning: cannot determine compiler flags for processor $processor_arch, OS $OS_FAMILY, and compiler $ccompiler" + #exit ${ERROR_EXIT_CODE} + ;; + esac + ;; + ia64) + bitsToBuild=64 + case $OS_FAMILY in + linux|freebsd) + buildCXXflags="-mtune=itanium2 -fPIC" + buildCflags="-mtune=itanium2 -fPIC" + buildLDflags="-mtune=itanium2 -fPIC" + buildTestLDflags="-mtune=itanium2 -fPIC" + ;; + hpux) + buildCXXflags="-mlp64 -mtune=itanium2 -fPIC" + buildCflags="-mlp64 -mtune=itanium2 -fPIC" + buildLDflags="-mlp64 -mtune=itanium2 -fPIC" + buildTestLDflags="-mlp64 -mtune=itanium2 -fPIC" + ;; + *) + echo "Warning: cannot determine compiler flags for processor $processor_arch, OS $OS_FAMILY, and compiler $ccompiler" + #exit ${ERROR_EXIT_CODE} + ;; + esac + ;; + sparc) + bitsToBuild=64 + case $OS_FAMILY in + sunos) + buildCXXflags="-mcpu=v9 -mtune=ultrasparc3 -m64 -fPIC -pthreads" + buildCflags="-mcpu=v9 -mtune=ultrasparc3 -m64 -fPIC -pthreads" + buildLDflags="-mcpu=v9 -mtune=ultrasparc3 -m64 -fPIC -pthreads" + buildTestLDflags="-mcpu=v9 -mtune=ultrasparc3 -m64 -fPIC -pthreads" + cxx_test_release_options="-fPIC" + ld_test_release_options="-fPIC" + ;; + linux) + buildCXXflags="-mcpu=v9 -mtune=ultrasparc3 -m64 -fPIC" + buildCflags="-mcpu=v9 -mtune=ultrasparc3 -m64 -fPIC" + buildLDflags="-mcpu=v9 -mtune=ultrasparc3 -m64 -fPIC" + buildTestLDflags="-mcpu=v9 -mtune=ultrasparc3 -m64 -fPIC" + cxx_test_release_options="-fPIC" + ld_test_release_options="-fPIC" + ;; + *) + echo "Warning: cannot determine compiler flags for processor $processor_arch, OS $OS_FAMILY, and compiler $ccompiler" + #exit ${ERROR_EXIT_CODE} + ;; + esac + ;; + powerpc) + bitsToBuild=64 + case $OS_FAMILY in + aix) + buildCXXflags="-maix64 -pthread -fPIC" + buildCflags="-maix64 -pthread -fPIC" + buildLDflags="-maix64 -pthread -fPIC" + buildTestLDflags="-maix64 -pthread -fPIC" + cxx_test_release_options="-fPIC" + ld_test_release_options="-fPIC" + ;; + *) + echo "Warning: cannot determine compiler flags for processor $processor_arch, OS $OS_FAMILY, and compiler $ccompiler" + #exit ${ERROR_EXIT_CODE} + ;; + esac + ;; + *) + echo "Warning: cannot determine compiler flags for processor $processor_arch and compiler $ccompiler" + #exit ${ERROR_EXIT_CODE} + ;; + esac + + cppcompiler_version=`$cppcompiler -dumpversion` + echo g++ version=$gcc_version + + # Setup target options + # buildCXXflags="-std=gnu++0x $buildCXXflags" + cxx_debug_options="-D_DEBUG -O0 -g $cxx_debug_options" + cxx_release_options="-DNDEBUG $cxx_release_optimization $cxx_release_options" + ;; + *) + echo "ERROR: Unknown compiler: $ccompiler" + exit ${ERROR_EXIT_CODE} + ;; +esac + +if test $BOOST_INCLUDE_PATH != ''; then + buildCXXflags="$buildCXXflags -I$BOOST_INCLUDE_PATH" +fi + +if test 'x$buildTestLDflags' = 'x'; then + buildTestLDflags=$buildLDflags +fi + + +EXTRA_CXXFLAGS="$buildCXXflags $EXTRA_CXXFLAGS" +EXTRA_CFLAGS="$buildCflags $EXTRA_CFLAGS" +EXTRA_LDFLAGS="$buildLDflags $EXTRA_LDFLAGS" + +EXTRA_TEST_LDFLAGS="$buildTestLDflags $EXTRA_TEST_LDFLAGS" + + +echo "Building with the following options ..." +echo "Processor: $processor_arch" +echo "Platform: $OS_FAMILY" +echo "C Compiler: $ccompiler" +echo "C++ Compiler: $cppcompiler" +echo "C++ Compiler version: $cppcompiler_version" +echo "Bits to build: $bitsToBuild" +echo "Compile options: $compileroptions $EXTRA_CXXFLAGS" +echo "Link options: $linkeroptions $EXTRA_LDFLAGS" +echo "Link options (for test cds-unit app): $linkeroptions $EXTRA_TEST_LDFLAGS" + +BITSTOBUILD=$bitsToBuild +export BITSTOBUILD + +# +# Set the C compiler and C++ compiler environment variables +# + +CC="$ccompiler" +export CC + +CXX="$cppcompiler" +export CXX + +ROOT_DIR=.. + +GOAL_DIR=$ccompiler$TOOLSET_SUFFIX-$processor_arch-$OS_FAMILY$OS_VERSION-$bitsToBuild +BIN_PATH=$ROOT_DIR/bin/$GOAL_DIR +mkdir -p $BIN_PATH + +OBJ_PATH=$ROOT_DIR/obj/$GOAL_DIR +mkdir -p $OBJ_PATH + +echo PATH=$PATH +echo LD_LIBRARY_PATH=$LD_LIBRARY_PATH +echo BIN_PATH=$BIN_PATH +echo OBJ_PATH=$OBJ_PATH +echo `${CXX} --version | head -1` +echo Build started + +makegoals= +if test $makeclean = 'on'; then + echo Clean all + $MAKE -f Makefile clean platform=$OS_FAMILY BIN_PATH=$BIN_PATH OBJ_PATH=$OBJ_PATH +fi + +echo --------------------------------- +echo Make debug library +CXXFLAGS="$compileroptions $cxx_debug_options $EXTRA_CXXFLAGS" +export CXXFLAGS +CFLAGS="$compileroptions $cxx_debug_options $EXTRA_CFLAGS $debugflag " +export CFLAGS +LDFLAGS="$linkeroptions -shared $ld_debug_options $ld_libs $EXTRA_LDFLAGS " +export LDFLAGS + +mkdir -p $OBJ_PATH/debug + +$MAKE -f Makefile \ + platform=$OS_FAMILY \ + BIN_PATH=$BIN_PATH \ + OBJ_PATH=$OBJ_PATH/debug \ + debug + +if test $? -gt 0; then + exit $? +fi + +echo --------------------------------- +echo Make release library + +CXXFLAGS="$compileroptions $cxx_release_options $EXTRA_CXXFLAGS " +export CXXFLAGS +CFLAGS="$compileroptions $cxx_release_options $EXTRA_CFLAGS " +export CFLAGS +LDFLAGS="$linkeroptions -shared $ld_resease_options $ld_libs $EXTRA_LDFLAGS " +export LDFLAGS + +mkdir -p $OBJ_PATH/release + +$MAKE -f Makefile \ + platform=$OS_FAMILY \ + BIN_PATH=$BIN_PATH \ + OBJ_PATH=$OBJ_PATH/release \ + release + +if test $? -gt 0; then + exit $? +fi + + +echo --------------------------------- +echo Make tests + +if test $MAKE_DEBUG_TEST = '0'; then + CXXFLAGS="$compileroptions $cxx_release_options $cxx_test_release_options $EXTRA_CXXFLAGS " + export CXXFLAGS + CFLAGS="$compileroptions $cxx_release_options $EXTRA_CFLAGS " + export CFLAGS + LDFLAGS="$linkeroptions $ld_release_options $ld_test_release_options $ld_libs $EXTRA_TEST_LDFLAGS " + export LDFLAGS + + $MAKE -f Makefile -j $makejobs \ + platform=$OS_FAMILY \ + BIN_PATH=$BIN_PATH \ + OBJ_PATH=$OBJ_PATH/test \ + $target + + if test $? -gt 0; then + exit $? + fi +fi + +echo --------------------------------- +echo Make tests debug + +if test $MAKE_DEBUG_TEST = '1'; then + CXXFLAGS="$compileroptions $cxx_debug_options $cxx_test_release_options $EXTRA_CXXFLAGS " + export CXXFLAGS + CFLAGS="$compileroptions $cxx_debug_options $EXTRA_CFLAGS " + export CFLAGS + LDFLAGS="$linkeroptions $ld_debug_options $ld_test_release_options $ld_libs $EXTRA_TEST_LDFLAGS " + export LDFLAGS + + $MAKE -f Makefile -j $makejobs \ + platform=$OS_FAMILY \ + BIN_PATH=$BIN_PATH \ + OBJ_PATH=$OBJ_PATH/test-debug \ + $target + + if test $? -gt 0; then + exit $? + fi +fi + \ No newline at end of file diff --git a/build/sample/build-freebsd-amd64.sh b/build/sample/build-freebsd-amd64.sh new file mode 100644 index 00000000..4fd2c7ee --- /dev/null +++ b/build/sample/build-freebsd-amd64.sh @@ -0,0 +1,13 @@ +#!/bin/sh + +PATH=/usr/local/bin:$PATH +export PATH + +./build.sh \ +--clean \ +--with-make gmake \ +--with-boost /usr/local/include \ +-x g++43 \ +-z '-std=c++0x -Wall -pedantic' \ +-l "-L/usr/local/lib" \ +2>&1 | tee build-freebsd-amd64.log diff --git a/build/sample/build-hpux1123.sh b/build/sample/build-hpux1123.sh new file mode 100644 index 00000000..dde0165c --- /dev/null +++ b/build/sample/build-hpux1123.sh @@ -0,0 +1,19 @@ +#!/bin/sh +clear + +export BOOST_ROOT=~/boost + +GCC_ROOT=/usr/local/gcc-433 +export PATH=${GCC_ROOT}/bin:${PATH} +export LD_LIBRARY_PATH=${GCC_ROOT}/lib:${LD_LIBRARY_PATH} + +./build.sh -b 64 \ +--clean \ +-x 'g++' \ +-z '-std=c++0x -Wall -pedantic' \ +-l '-L${BOOST_ROOT}/bin/hpux1123-ia64' \ +--with-boost ${BOOST_ROOT} \ +--with-make gmake \ +--platform-suffix 11.23 \ +2>&1 | tee build-hpux11.23-ia64.log + diff --git a/build/sample/build-hpux1131.sh b/build/sample/build-hpux1131.sh new file mode 100644 index 00000000..386387a5 --- /dev/null +++ b/build/sample/build-hpux1131.sh @@ -0,0 +1,19 @@ +#!/bin/sh +clear + +export BOOST_ROOT=~/boost + +GCC_ROOT=/usr/local/gcc-433 +export PATH=${GCC_ROOT}/bin:${PATH} +export LD_LIBRARY_PATH=${GCC_ROOT}/lib:${LD_LIBRARY_PATH} + +./build.sh -b 64 \ +--clean \ +-x 'g++' \ +-z '-std=c++0x -Wall -pedantic' \ +-l '-L${BOOST_ROOT}/bin/hpux1131-ia64' \ +--with-boost ${BOOST_ROOT} \ +--with-make gmake \ +--platform-suffix 11.31 \ +2>&1 | tee build-hpux11.31-ia64.log + diff --git a/build/sample/build-linux-amd64.sh b/build/sample/build-linux-amd64.sh new file mode 100644 index 00000000..3d832469 --- /dev/null +++ b/build/sample/build-linux-amd64.sh @@ -0,0 +1,18 @@ +#!/bin/sh +clear + +export BOOST_ROOT=~/boost + +GCC_ROOT=/usr/local/gcc-4.3.3 +export PATH=${GCC_ROOT}/bin:${PATH} +export LD_LIBRARY_PATH=${GCC_ROOT}/lib:${LD_LIBRARY_PATH} + +./build.sh -b 64 \ +--clean \ +-x 'g++' \ +-z '-std=c++0x -Wall -pedantic' \ +-l '-L${BOOST_ROOT}/bin/linux-amd64' \ +--with-boost ${BOOST_ROOT} \ +--amd64-use-128bit \ +2>&1 | tee build-linux-amd64.log + diff --git a/build/sample/build-linux-ia64.sh b/build/sample/build-linux-ia64.sh new file mode 100644 index 00000000..7ca441ec --- /dev/null +++ b/build/sample/build-linux-ia64.sh @@ -0,0 +1,17 @@ +#!/bin/sh +clear + +export BOOST_ROOT=~/boost + +GCC_ROOT=/usr/local/gcc-4.3.3 +export PATH=${GCC_ROOT}/bin:${PATH} +export LD_LIBRARY_PATH=${GCC_ROOT}/lib:${LD_LIBRARY_PATH} + +./build.sh -b 64 \ +--clean \ +-x 'g++' \ +-z '-std=c++0x -Wall -pedantic' \ +-l '-L${BOOST_ROOT}/bin/linux-ia64' \ +--with-boost ${BOOST_ROOT} \ +2>&1 | tee build-linux-ia64.log + diff --git a/build/sample/build-linux-sparc.sh b/build/sample/build-linux-sparc.sh new file mode 100644 index 00000000..084b675d --- /dev/null +++ b/build/sample/build-linux-sparc.sh @@ -0,0 +1,14 @@ +#!/bin/sh +clear + +BOOST_ROOT=~/boost/boost_1_49_0 +export BOOST_ROOT + +./build.sh -b 64 -j 2 \ +-x 'g++' \ +-z '-std=c++0x -Wall -pedantic' \ +-l '-L/opt/cfarm/release/4.4.1-64/lib64 -L${BOOST_ROOT}/stage64/lib' \ +--with-boost ${BOOST_ROOT} \ +2>&1 | tee build-linux-sparc.log + + diff --git a/build/sample/build-linux-x86.sh b/build/sample/build-linux-x86.sh new file mode 100644 index 00000000..a2e638e6 --- /dev/null +++ b/build/sample/build-linux-x86.sh @@ -0,0 +1,17 @@ +#!/bin/sh +clear + +export BOOST_ROOT=~/boost + +GCC_ROOT=/usr/local/gcc-4.3.3 +export PATH=${GCC_ROOT}/bin:${PATH} +export LD_LIBRARY_PATH=${GCC_ROOT}/lib:${LD_LIBRARY_PATH} + +./build.sh -b 32 \ +--clean \ +-x 'g++' \ +-z '-std=c++0x -Wall -pedantic' \ +-l '-L${BOOST_ROOT}/bin/linux-x86' \ +--with-boost ${BOOST_ROOT} \ +2>&1 | tee build-linux-x86.log + diff --git a/build/sample/build-mingw-amd64.bat b/build/sample/build-mingw-amd64.bat new file mode 100644 index 00000000..62f393cd --- /dev/null +++ b/build/sample/build-mingw-amd64.bat @@ -0,0 +1,14 @@ + +:: This sample demonstrates how to build libcds in MinGW (clearly, with TDM-GCC). +:: Run TDM-GCC console, change dir to libcds build directory and run this batch file +:: +:: BOOST_PATH - environment variable that contains full path to boost root directory +:: for example: set BOOST_PATH=d:\libs\boost_1_49_0 +:: + +set BOOST_PATH=d:/Language/Lib/C/boost/boost_1_49_0 + +:: We use -DBOOST_THREAD_USE_LIB flag since static libboost_thread is used + +build.sh -b 64 -x 'g++' -z '-std=c++0x -Wall -pedantic -DBOOST_THREAD_USE_LIB -D_WIN32_WINNT=0x0500' -l "-L%BOOST_PATH%/stage-mingw64/lib" --with-boost "%BOOST_PATH%" --with-make mingw32-make 2>&1 | tee build-mingw-amd64.log + diff --git a/build/sample/build-osx-clang-libc++.sh b/build/sample/build-osx-clang-libc++.sh new file mode 100644 index 00000000..8c2aad29 --- /dev/null +++ b/build/sample/build-osx-clang-libc++.sh @@ -0,0 +1,16 @@ +#! /bin/sh + +BOOST_ROOT=~/works/libs/boost_1_55_0 +./build.sh \ + --clean \ + -c clang \ + -x clang \ + -b 64 \ + -z "--std=c++11 -DCDS_USE_LIBCDS_ATOMIC -I/usr/lib/c++/v1 -I$BOOST_ROOT" \ + -l "-L$BOOST_ROOT/stage64-clang/lib -lc++ -lc++abi -lpthread -ldl" \ + --nodefaultlibs \ + --with-boost $BOOST_ROOT \ + --arch-tune core2 \ + 2>&1 |tee build.log + +# -z "--std=c++11 -DCDS_USE_LIBCDS_ATOMIC -I/usr/lib/c++/v1 -I/usr/include -I$BOOST_ROOT" diff --git a/build/sample/build-osx-gcc.sh b/build/sample/build-osx-gcc.sh new file mode 100644 index 00000000..ee0d73d5 --- /dev/null +++ b/build/sample/build-osx-gcc.sh @@ -0,0 +1,16 @@ +#! /bin/sh + +BOOST_ROOT=~/works/libs/boost_1_55_0 +MACOS_SDK_INCLUDE=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk/usr/include + +./build.sh \ + --clean \ + -c gcc \ + -x gcc-4.8 \ + -b 64 \ + -z "--std=c++11 -I$BOOST_ROOT -I$MACOS_SDK_INCLUDE" \ + -l "-L$BOOST_ROOT/stage64/lib" \ + --with-boost $BOOST_ROOT \ + --arch-tune core2 \ + 2>&1 |tee build.log + diff --git a/build/sample/build-sun-sparc.sh b/build/sample/build-sun-sparc.sh new file mode 100644 index 00000000..7fa3f455 --- /dev/null +++ b/build/sample/build-sun-sparc.sh @@ -0,0 +1,21 @@ +#!/bin/sh +clear + +BOOST_ROOT=~/boost +export BOOST_ROOT + +GCC_ROOT=/usr/local/gcc433 +PATH=${GCC_ROOT}/bin:${PATH} +export PATH +LD_LIBRARY_PATH=${GCC_ROOT}/lib:/usr/local/lib/mpfr-2.4.1/lib:/usr/local/lib/gmp-4.2.4/lib:${LD_LIBRARY_PATH} +export LD_LIBRARY_PATH + +./build.sh -b 64 \ +--clean \ +-x 'g++' \ +-z '-std=c++0x -Wall -pedantic' \ +-l '-L${BOOST_ROOT}/bin/sun-sparc64' \ +--with-boost ${BOOST_ROOT} \ +2>&1 | tee build-sun-sparc.log + + diff --git a/cds/algo/base.h b/cds/algo/base.h new file mode 100644 index 00000000..d07ffcb6 --- /dev/null +++ b/cds/algo/base.h @@ -0,0 +1,15 @@ +//$$CDS-header$$ + +#ifndef __CDS_ALGO_BASE_H +#define __CDS_ALGO_BASE_H + +#include + +namespace cds { + + /// Different approaches and techniques for supporting high-concurrent data structure + namespace algo {} + +} // namespace cds + +#endif // #ifndef __CDS_ALGO_BASE_H diff --git a/cds/algo/elimination.h b/cds/algo/elimination.h new file mode 100644 index 00000000..52595d90 --- /dev/null +++ b/cds/algo/elimination.h @@ -0,0 +1,58 @@ +//$$CDS-header$$ + +#ifndef __CDS_ALGO_ELIMINATION_H +#define __CDS_ALGO_ELIMINATION_H + +#include +#include +#include +#include + +namespace cds { namespace algo { + + /// Elimination technique + /** @anchor cds_elimination_description + Elimination technique allows highly distributed coupling and execution of operations with reverse + semantics like the pushes and pops on a stack. If a push followed by a pop are performed + on a stack, the data structure's state does not change (similarly for a pop followed by a push). + This means that if one can cause pairs of pushes and pops to meet and pair up in + separate locations, the threads can exchange values without having to touch a centralized structure + since they have anyhow "eliminated" each other's effect on it. Elimination can be implemented + by using a collision array in which threads pick random locations in order to try and collide. + Pairs of threads that "collide" in some location run through a synchronization protocol, + and all such disjoint collisions can be performed in parallel. If a thread has not met another + in the selected location or if it met a thread with an operation that cannot be eliminated + (such as two push operations), an alternative scheme must be used. + */ + namespace elimination { + + /// Base class describing an operation for eliminating + /** + This class contains some debugng info. + Actual operation descriptor depends on real container and its interface. + */ + struct operation_desc + { + record * pOwner; ///< Owner of the descriptor + }; + + /// Acquires elimination record for the current thread + template + static inline record * init_record( OperationDesc& op ) + { + record& rec = cds::threading::elimination_record(); + assert( rec.is_free()); + op.pOwner = &rec; + rec.pOp = static_cast( &op ); + return &rec; + } + + /// Releases elimination record for the current thread + static inline void clear_record() + { + cds::threading::elimination_record().pOp = null_ptr(); + } + } // namespace elimination +}} // namespace cds::algo + +#endif // __CDS_ALGO_ELIMINATION_H diff --git a/cds/algo/elimination_opt.h b/cds/algo/elimination_opt.h new file mode 100644 index 00000000..bca3bcce --- /dev/null +++ b/cds/algo/elimination_opt.h @@ -0,0 +1,37 @@ +//$$CDS-header$$ + +#ifndef __CDS_ALGO_ELIMINATION_OPT_H +#define __CDS_ALGO_ELIMINATION_OPT_H + +#include + +namespace cds { namespace opt { + + /// Enable \ref cds_elimination_description "elimination back-off" for the container + template + struct enable_elimination { + //@cond + template struct pack: public Base + { + static CDS_CONSTEXPR_CONST bool enable_elimination = Enable; + }; + //@endcond + }; + + /// \ref cds_elimination_description "Elimination back-off strategy" option setter + /** + Back-off strategy for elimination. + Usually, elimination back-off strategy is \p cds::backoff::delay. + */ + template + struct elimination_backoff { + //@cond + template struct pack: public Base + { + typedef Type elimination_backoff; + }; + //@endcond + }; +}} // namespace cds::opt + +#endif // #ifndef __CDS_ALGO_ELIMINATION_OPT_H diff --git a/cds/algo/elimination_tls.h b/cds/algo/elimination_tls.h new file mode 100644 index 00000000..2b0091f4 --- /dev/null +++ b/cds/algo/elimination_tls.h @@ -0,0 +1,34 @@ +//$$CDS-header$$ + +#ifndef __CDS_ALGO_ELIMINATION_TLS_H +#define __CDS_ALGO_ELIMINATION_TLS_H + +#include + +namespace cds { namespace algo { namespace elimination { + + // Forwards + struct operation_desc; + + /// Per-thread elimination record + /** @headerfile cds/algo/elimination.h + */ + struct record + { + operation_desc * pOp ; ///< Operation descriptor + + /// Initialization + record() + : pOp( null_ptr() ) + {} + + /// Checks if the record is free + bool is_free() const + { + return pOp == null_ptr(); + } + }; + +}}} // cds::algo::elimination + +#endif // #ifndef __CDS_ALGO_ELIMINATION_TLS_H diff --git a/cds/algo/flat_combining.h b/cds/algo/flat_combining.h new file mode 100644 index 00000000..30a8961a --- /dev/null +++ b/cds/algo/flat_combining.h @@ -0,0 +1,808 @@ +//$$CDS-header$$ + +#ifndef __CDS_ALGO_FLAT_COMBINING_H +#define __CDS_ALGO_FLAT_COMBINING_H + +#include +#include +#include +#include +#include // lock_guard +#include +#include +#include // thread_specific_ptr + +namespace cds { namespace algo { + + /// @defgroup cds_flat_combining_intrusive Intrusive flat combining containers + /// @defgroup cds_flat_combining_container Non-intrusive flat combining containers + + /// Flat combining + /** + @anchor cds_flat_combining_description + Flat combining (FC) technique is invented by Hendler, Incze, Shavit and Tzafrir in their paper + [2010] "Flat Combining and the Synchronization-Parallelism Tradeoff". + The technique converts a sequential data structure to its concurrent implementation. + A few structures are added to the sequential implementation: a global lock, + a count of the number of combining passes, and a pointer to the head + of a publication list. The publication list is a list of thread-local records + of a size proportional to the number of threads that are concurrently accessing the shared object. + + Each thread \p t accessing the structure to perform an invocation of some method \p m + on the shared object executes the following sequence of steps: +
    +
  1. Write the invocation opcode and parameters (if any) of the method \p m to be applied + sequentially to the shared object in the request field of your thread local publication + record (there is no need to use a load-store memory barrier). The request field will later + be used to receive the response. If your thread local publication record is marked as active + continue to step 2, otherwise continue to step 5.
  2. +
  3. Check if the global lock is taken. If so (another thread is an active combiner), spin on the request + field waiting for a response to the invocation (one can add a yield at this point to allow other threads + on the same core to run). Once in a while while spinning check if the lock is still taken and that your + record is active. If your record is inactive proceed to step 5. Once the response is available, + reset the request field to null and return the response.
  4. +
  5. If the lock is not taken, attempt to acquire it and become a combiner. If you fail, + return to spinning in step 2.
  6. +
  7. Otherwise, you hold the lock and are a combiner. +
      +
    • Increment the combining pass count by one.
    • +
    • Execute a \p fc_apply() by traversing the publication list from the head, + combining all nonnull method call invocations, setting the age of each of these records + to the current count, applying the combined method calls to the structure D, and returning + responses to all the invocations. This traversal is guaranteed to be wait-free.
    • +
    • If the count is such that a cleanup needs to be performed, traverse the publication + list from the head. Starting from the second item (we always leave the item pointed to + by the head in the list), remove from the publication list all records whose age is + much smaller than the current count. This is done by removing the node and marking it + as inactive.
    • +
    • Release the lock.
    • +
    +
  8. If you have no thread local publication record allocate one, marked as active. If you already + have one marked as inactive, mark it as active. Execute a store-load memory barrier. Proceed to insert + the record into the list with a successful CAS to the head. Then proceed to step 1.
  9. +
+ + As the test results show, the flat combining technique is suitable for non-intrusive containers + like stack, queue, deque. For intrusive concurrent containers the flat combining demonstrates + less impressive results. + + \ref cds_flat_combining_container "List of FC-based containers" in libcds. + + \ref cds_flat_combining_intrusive "List of intrusive FC-based containers" in libcds. + */ + namespace flat_combining { + + /// Special values of publication_record::nRequest + enum request_value + { + req_EmptyRecord, ///< Publication record is empty + req_Response, ///< Operation is done + + req_Operation ///< First operation id for derived classes + }; + + /// publication_record state + enum record_state { + inactive, ///< Record is inactive + active, ///< Record is active + removed ///< Record should be removed + }; + + /// Record of publication list + /** + Each data structure based on flat combining contains a class derived from \p %publication_record + */ + struct publication_record { + CDS_ATOMIC::atomic nRequest; ///< Request field (depends on data structure) + CDS_ATOMIC::atomic nState; ///< Record state: inactive, active, removed + unsigned int nAge; ///< Age of the record + CDS_ATOMIC::atomic pNext; ///< Next record in publication list + void * pOwner; ///< [internal data] Pointer to \ref kernel object that manages the publication list + + /// Initializes publication record + publication_record() + : nRequest( req_EmptyRecord ) + , nState( inactive ) + , nAge(0) + , pNext( null_ptr() ) + , pOwner( null_ptr() ) + {} + + /// Returns the value of \p nRequest field + unsigned int op() const + { + return nRequest.load( CDS_ATOMIC::memory_order_relaxed ); + } + + /// Checks if the operation is done + bool is_done() const + { + return nRequest.load( CDS_ATOMIC::memory_order_relaxed ) == req_Response; + } + }; + + /// Flat combining internal statistics + template + struct stat + { + typedef Counter counter_type; ///< Event counter type + + counter_type m_nOperationCount ; ///< How many operations have been performed + counter_type m_nCombiningCount ; ///< Combining call count + counter_type m_nCompactPublicationList; ///< Count of publication list compacting + counter_type m_nDeactivatePubRecord; ///< How many publication records were deactivated during compacting + counter_type m_nActivatePubRecord; ///< Count of publication record activating + counter_type m_nPubRecordCreated ; ///< Count of created publication records + counter_type m_nPubRecordDeteted ; ///< Count of deleted publication records + counter_type m_nAcquirePubRecCount; ///< Count of acquiring publication record + counter_type m_nReleasePubRecCount; ///< Count on releasing publication record + + /// Returns current combining factor + /** + Combining factor is how many operations perform in one combine pass: + combining_factor := m_nOperationCount / m_nCombiningCount + */ + double combining_factor() const + { + return m_nCombiningCount.get() ? double( m_nOperationCount.get()) / m_nCombiningCount.get() : 0.0; + } + + //@cond + void onOperation() { ++m_nOperationCount; } + void onCombining() { ++m_nCombiningCount; } + void onCompactPublicationList() { ++m_nCompactPublicationList; } + void onDeactivatePubRecord() { ++m_nDeactivatePubRecord; } + void onActivatPubRecord() { ++m_nActivatePubRecord; } + void onCreatePubRecord() { ++m_nPubRecordCreated; } + void onDeletePubRecord() { ++m_nPubRecordDeteted; } + void onAcquirePubRecord() { ++m_nAcquirePubRecCount; } + void onReleasePubRecord() { ++m_nReleasePubRecCount; } + //@endcond + }; + + /// Flat combining dummy internal statistics + struct empty_stat + { + //@cond + void onOperation() {} + void onCombining() {} + void onCompactPublicationList() {} + void onDeactivatePubRecord() {} + void onActivatPubRecord() {} + void onCreatePubRecord() {} + void onDeletePubRecord() {} + void onAcquirePubRecord() {} + void onReleasePubRecord() {} + //@endcond + }; + + /// Type traits of \ref kernel class + /** + You can define different type traits for \ref kernel + by specifying your struct based on \p %type_traits + or by using \ref make_traits metafunction. + */ + struct type_traits + { + typedef cds::lock::Spin lock_type; ///< Lock type + typedef cds::backoff::delay_of<2> back_off; ///< Back-off strategy + typedef CDS_DEFAULT_ALLOCATOR allocator; ///< Allocator used for TLS data (allocating publication_record derivatives) + typedef empty_stat stat; ///< Internal statistics + typedef opt::v::relaxed_ordering memory_model; ///< /// C++ memory ordering model + }; + + /// Metafunction converting option list to traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + \p Options are: + - \p opt::lock_type - mutex type, default is \p cds::lock::Spin + - \p opt::back_off - back-off strategy, defalt is \p cds::backoff::delay_of<2> + - \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default) + - \p opt::memory_model - C++ memory ordering model. + List of all available memory ordering see opt::memory_model. + Default if cds::opt::v:relaxed_ordering + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS6 >::type + ,CDS_OPTIONS6 + >::type type; +# endif + }; + + /// The kernel of flat combining + /** + Template parameters: + - \p PublicationRecord - a type derived from \ref publication_record + - \p Traits - a type traits of flat combining, default is flat_combining::type_traits. + \ref make_traits metafunction can be used to create type traits + + The kernel object should be a member of a container class. The container cooperates with flat combining + kernel object. There are two ways to interact with the kernel: + - One-by-one processing the active records of the publication list. This mode provides \ref combine function: + the container acquires its publication record by \ref acquire_record, fills its fields and calls + \p combine function of its kernel object. If the current thread becomes a combiner, the kernel + calls \p fc_apply function of the container for each active non-empty record. Then, the container + should release its publication record by \ref release_record. Only one pass through the publication + list is possible. + - Batch processing (\ref batch_combine function). It this mode the container obtains access + to entire publication list. This mode allows the container to perform an elimination, for example, + the stack can collide \p push and \p pop requests. The sequence of invocations is the following: + the container acquires its publication record by \ref acquire_record, fills its field and call + \p batch_combine function of its kernel object. If the current thread becomes a combiner, + the kernel calls \p fc_process function of the container passing two iterators pointing to + begin and end of publication list (see \ref iterator class). The iterators allows + multiple pass through active records of publication list. For each processed record the container + should call \ref operation_done function. On the end, the container should release + its record by \ref release_record. + */ + template < + typename PublicationRecord + ,typename Traits = type_traits + > + class kernel + { + public: + typedef PublicationRecord publication_record_type; ///< publication record type + typedef Traits type_traits; ///< Type traits + typedef typename type_traits::lock_type global_lock_type; ///< Global lock type + typedef typename type_traits::back_off back_off; ///< back-off strategy type + typedef typename type_traits::allocator allocator; ///< Allocator type (used for allocating publication_record_type data) + typedef typename type_traits::stat stat; ///< Internal statistics + typedef typename type_traits::memory_model memory_model; ///< C++ memory model + + protected: + //@cond + typedef cds::details::Allocator< publication_record_type, allocator > cxx11_allocator; ///< internal helper cds::details::Allocator + typedef cds_std::lock_guard lock_guard; + //@endcond + + protected: + unsigned int m_nCount; ///< Count of combining passes + publication_record_type * m_pHead; ///< Head of publication list + boost::thread_specific_ptr< publication_record_type > m_pThreadRec; ///< Thread-local publication record + mutable global_lock_type m_Mutex; ///< Global mutex + mutable stat m_Stat; ///< Internal statistics + unsigned int const m_nCompactFactor; ///< Publication list compacting factor (the list will be compacted through \p %m_nCompactFactor combining passes) + unsigned int const m_nCombinePassCount; ///< Number of combining passes + + public: + /// Initializes the object + /** + Compact factor = 64 + + Combiner pass count = 8 + */ + kernel() + : m_nCount(0) + , m_pHead( null_ptr< publication_record_type *>()) + , m_pThreadRec( tls_cleanup ) + , m_nCompactFactor( 64 - 1 ) // binary mask + , m_nCombinePassCount( 8 ) + { + init(); + } + + /// Initializes the object + kernel( + unsigned int nCompactFactor ///< Publication list compacting factor (the list will be compacted through \p nCompactFactor combining passes) + ,unsigned int nCombinePassCount ///< Number of combining passes for combiner thread + ) + : m_nCount(0) + , m_pHead( null_ptr< publication_record_type *>()) + , m_pThreadRec( tls_cleanup ) + , m_nCompactFactor( (unsigned int)( cds::beans::ceil2( nCompactFactor ) - 1 )) // binary mask + , m_nCombinePassCount( nCombinePassCount ) + { + init(); + } + + /// Destroys the objects and mark all publication records as inactive + ~kernel() + { + // mark all publication record as detached + for ( publication_record * p = m_pHead; p; p = p->pNext.load( memory_model::memory_order_relaxed )) + p->pOwner = null_ptr(); + } + + /// Gets publication list record for the current thread + /** + If there is no publication record for the current thread + the function allocates it. + */ + publication_record_type * acquire_record() + { + publication_record_type * pRec = m_pThreadRec.get(); + if ( !pRec ) { + // Allocate new publication record + pRec = cxx11_allocator().New(); + pRec->pOwner = reinterpret_cast( this ); + m_pThreadRec.reset( pRec ); + m_Stat.onCreatePubRecord(); + } + + if ( pRec->nState.load( memory_model::memory_order_acquire ) != active ) + publish( pRec ); + + assert( pRec->nRequest.load( memory_model::memory_order_relaxed ) == req_EmptyRecord ); + + m_Stat.onAcquirePubRecord(); + return pRec; + } + + /// Marks publication record for the current thread as empty + void release_record( publication_record_type * pRec ) + { + assert( pRec->is_done() ); + pRec->nRequest.store( req_EmptyRecord, memory_model::memory_order_relaxed ); + m_Stat.onReleasePubRecord(); + } + + /// Trying to execute operation \p nOpId + /** + \p pRec is the publication record acquiring by \ref acquire_record earlier. + \p owner is a container that is owner of flat combining kernel object. + As a result the current thread can become a combiner or can wait for + another combiner performs \p pRec operation. + + If the thread becomes a combiner, the kernel calls \p owner.fc_apply + for each active non-empty publication record. + */ + template + void combine( unsigned int nOpId, publication_record_type * pRec, Container& owner ) + { + assert( nOpId >= req_Operation ); + assert( pRec ); + //assert( pRec->nState.load( memory_model::memory_order_relaxed ) == active ); + pRec->nRequest.store( nOpId, memory_model::memory_order_release ); + + m_Stat.onOperation(); + + try_combining( owner, pRec ); + } + + /// Trying to execute operation \p nOpId in batch-combine mode + /** + \p pRec is the publication record acquiring by \ref acquire_record earlier. + \p owner is a container that owns flat combining kernel object. + As a result the current thread can become a combiner or can wait for + another combiner performs \p pRec operation. + + If the thread becomes a combiner, the kernel calls \p owner.fc_process + giving the container the full access over publication list. This function + is useful for an elimination technique if the container supports any kind of + that. The container can perform multiple pass through publication list. + + \p owner.fc_process has two arguments - forward iterators on begin and end of + publication list, see \ref iterator class. For each processed record the container + should call \ref operation_done function to mark the record as processed. + + On the end of \p %batch_combine the \ref combine function is called + to process rest of publication records. + */ + template + void batch_combine( unsigned int nOpId, publication_record_type * pRec, Container& owner ) + { + assert( nOpId >= req_Operation ); + assert( pRec ); + //assert( pRec->nState.load( memory_model::memory_order_relaxed ) == active ); + pRec->nRequest.store( nOpId, memory_model::memory_order_release ); + + m_Stat.onOperation(); + + try_batch_combining( owner, pRec ); + } + + /// Waits for end of combining + void wait_while_combining() const + { + lock_guard l( m_Mutex ); + } + + /// Marks \p rec as executed + /** + This function should be called by container if batch_combine mode is used. + For usual combining (see \ref combine) this function is excess. + */ + void operation_done( publication_record& rec ) + { + rec.nRequest.store( req_Response, memory_model::memory_order_release ); + } + + /// Internal statistics + stat const& statistics() const + { + return m_Stat; + } + + //@cond + // For container classes based on flat combining + stat& internal_statistics() const + { + return m_Stat; + } + //@endcond + + /// Returns the compact factor + unsigned int compact_factor() const + { + return m_nCompactFactor + 1; + } + + /// Returns number of combining passes for combiner thread + unsigned int combine_pass_count() const + { + return m_nCombinePassCount; + } + + public: + /// Publication list iterator + /** + Iterators are intended for batch processing by container's + \p fc_process function. + The iterator allows iterate through active publication list. + */ + class iterator + { + //@cond + friend class kernel; + publication_record_type * m_pRec; + //@endcond + + protected: + //@cond + iterator( publication_record_type * pRec ) + : m_pRec( pRec ) + { + skip_inactive(); + } + + void skip_inactive() + { + while ( m_pRec && (m_pRec->nState.load( memory_model::memory_order_acquire ) != active + || m_pRec->nRequest.load( memory_model::memory_order_relaxed) < req_Operation )) + { + m_pRec = static_cast(m_pRec->pNext.load( memory_model::memory_order_acquire )); + } + } + //@endcond + + public: + /// Initializes an empty iterator object + iterator() + : m_pRec( null_ptr()) + {} + + /// Copy ctor + iterator( iterator const& src ) + : m_pRec( src.m_pRec ) + {} + + /// Pre-increment + iterator& operator++() + { + assert( m_pRec ); + m_pRec = static_cast( m_pRec->pNext.load( memory_model::memory_order_acquire )); + skip_inactive(); + return *this; + } + + /// Post-increment + iterator operator++(int) + { + assert( m_pRec ); + iterator it(*this); + ++(*this); + return it; + } + + /// Dereference operator, can return \p nullptr + publication_record_type * operator ->() + { + return m_pRec; + } + + /// Dereference operator, the iterator should not be an end iterator + publication_record_type& operator*() + { + assert( m_pRec ); + return *m_pRec; + } + + /// Iterator equality + friend bool operator==( iterator it1, iterator it2 ) + { + return it1.m_pRec == it2.m_pRec; + } + + /// Iterator inequality + friend bool operator!=( iterator it1, iterator it2 ) + { + return !( it1 == it2 ); + } + }; + + /// Returns an iterator to the first active publication record + iterator begin() { return iterator(m_pHead); } + + /// Returns an iterator to the end of publication list. Should not be dereferenced. + iterator end() { return iterator(); } + + private: + //@cond + static void tls_cleanup( publication_record_type * pRec ) + { + // Thread done + // pRec that is TLS data should be excluded from publication list + if ( pRec ) { + if ( pRec->nState.load(memory_model::memory_order_relaxed) == active && pRec->pOwner ) { + // record is active and kernel is alive + unsigned int nState = active; + pRec->nState.compare_exchange_strong( nState, removed, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + } + else { + // record is not in publication list or kernel already deleted + cxx11_allocator().Delete( pRec ); + } + } + } + + void init() + { + assert( m_pThreadRec.get() == null_ptr() ); + publication_record_type * pRec = cxx11_allocator().New(); + m_pHead = pRec; + pRec->pOwner = this; + m_pThreadRec.reset( pRec ); + m_Stat.onCreatePubRecord(); + } + + void publish( publication_record_type * pRec ) + { + assert( pRec->nState.load( memory_model::memory_order_relaxed ) == inactive ); + + pRec->nAge = m_nCount; + pRec->nState.store( active, memory_model::memory_order_release ); + + // Insert record to publication list + if ( m_pHead != static_cast(pRec) ) { + publication_record * p = m_pHead->pNext.load(memory_model::memory_order_relaxed); + if ( p != static_cast( pRec )) { + do { + pRec->pNext = p; + // Failed CAS changes p + } while ( !m_pHead->pNext.compare_exchange_weak( p, static_cast(pRec), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + m_Stat.onActivatPubRecord(); + } + } + } + + void republish( publication_record_type * pRec ) + { + if ( pRec->nState.load( memory_model::memory_order_relaxed ) != active ) { + // The record has been excluded from publication list. Reinsert it + publish( pRec ); + } + } + + template + void try_combining( Container& owner, publication_record_type * pRec ) + { + if ( m_Mutex.try_lock() ) { + // The thread becomes a combiner + lock_guard l( m_Mutex, cds_std::adopt_lock_t() ); + + // The record pRec can be excluded from publication list. Re-publish it + republish( pRec ); + + combining( owner ); + assert( pRec->nRequest.load( memory_model::memory_order_relaxed ) == req_Response ); + } + else { + // There is another combiner, wait while it executes our request + if ( !wait_for_combining( pRec ) ) { + // The thread becomes a combiner + lock_guard l( m_Mutex, cds_std::adopt_lock_t() ); + + // The record pRec can be excluded from publication list. Re-publish it + republish( pRec ); + + combining( owner ); + assert( pRec->nRequest.load( memory_model::memory_order_relaxed ) == req_Response ); + } + } + } + + template + void try_batch_combining( Container& owner, publication_record_type * pRec ) + { + if ( m_Mutex.try_lock() ) { + // The thread becomes a combiner + lock_guard l( m_Mutex, cds_std::adopt_lock_t() ); + + // The record pRec can be excluded from publication list. Re-publish it + republish( pRec ); + + batch_combining( owner ); + assert( pRec->nRequest.load( memory_model::memory_order_relaxed ) == req_Response ); + } + else { + // There is another combiner, wait while it executes our request + if ( !wait_for_combining( pRec ) ) { + // The thread becomes a combiner + lock_guard l( m_Mutex, cds_std::adopt_lock_t() ); + + // The record pRec can be excluded from publication list. Re-publish it + republish( pRec ); + + batch_combining( owner ); + assert( pRec->nRequest.load( memory_model::memory_order_relaxed ) == req_Response ); + } + } + } + + template + void combining( Container& owner ) + { + // The thread is a combiner + assert( !m_Mutex.try_lock() ); + + unsigned int const nCurAge = ++m_nCount; + + for ( unsigned int nPass = 0; nPass < m_nCombinePassCount; ++nPass ) + if ( !combining_pass( owner, nCurAge )) + break; + + m_Stat.onCombining(); + if ( (nCurAge & m_nCompactFactor) == 0 ) + compact_list( nCurAge ); + } + + template + bool combining_pass( Container& owner, unsigned int nCurAge ) + { + publication_record * pPrev = null_ptr(); + publication_record * p = m_pHead; + bool bOpDone = false; + while ( p ) { + switch ( p->nState.load( memory_model::memory_order_acquire )) { + case active: + if ( p->op() >= req_Operation ) { + p->nAge = nCurAge; + owner.fc_apply( static_cast(p) ); + operation_done( *p ); + bOpDone = true; + } + break; + case inactive: + // Only m_pHead can be inactive in the publication list + assert( p == m_pHead ); + break; + case removed: + // The record should be removed + p = unlink_and_delete_record( pPrev, p ); + continue; + default: + /// ??? That is impossible + assert(false); + } + pPrev = p; + p = p->pNext.load( memory_model::memory_order_acquire ); + } + return bOpDone; + } + + template + void batch_combining( Container& owner ) + { + // The thread is a combiner + assert( !m_Mutex.try_lock() ); + + unsigned int const nCurAge = ++m_nCount; + + for ( unsigned int nPass = 0; nPass < m_nCombinePassCount; ++nPass ) + owner.fc_process( begin(), end() ); + + combining_pass( owner, nCurAge ); + m_Stat.onCombining(); + if ( (nCurAge & m_nCompactFactor) == 0 ) + compact_list( nCurAge ); + } + + bool wait_for_combining( publication_record_type * pRec ) + { + back_off bkoff; + while ( pRec->nRequest.load( memory_model::memory_order_acquire ) != req_Response ) { + + // The record can be excluded from publication list. Reinsert it + republish( pRec ); + + bkoff(); + + if ( m_Mutex.try_lock() ) { + if ( pRec->nRequest.load( memory_model::memory_order_acquire ) == req_Response ) { + m_Mutex.unlock(); + break; + } + // The thread becomes a combiner + return false; + } + } + return true; + } + + void compact_list( unsigned int const nCurAge ) + { + // Thinning publication list + publication_record * pPrev = null_ptr(); + for ( publication_record * p = m_pHead; p; ) { + if ( p->nState.load( memory_model::memory_order_acquire ) == active && p->nAge + m_nCompactFactor < nCurAge ) { + if ( pPrev ) { + publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire ); + if ( pPrev->pNext.compare_exchange_strong( p, pNext, + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + { + p->nState.store( inactive, memory_model::memory_order_release ); + p = pNext; + m_Stat.onDeactivatePubRecord(); + continue; + } + } + } + pPrev = p; + p = p->pNext.load( memory_model::memory_order_acquire ); + } + + m_Stat.onCompactPublicationList(); + } + + publication_record * unlink_and_delete_record( publication_record * pPrev, publication_record * p ) + { + if ( pPrev ) { + publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire ); + if ( pPrev->pNext.compare_exchange_strong( p, pNext, + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + { + cxx11_allocator().Delete( static_cast( p )); + m_Stat.onDeletePubRecord(); + } + return pNext; + } + else { + m_pHead = static_cast( p->pNext.load( memory_model::memory_order_acquire )); + cxx11_allocator().Delete( static_cast( p )); + m_Stat.onDeletePubRecord(); + return m_pHead; + } + } + //@endcond + }; + + //@cond + class container + { + public: + template + void fc_apply( PubRecord * ) + { + assert( false ); + } + + template + void fc_process( Iterator, Iterator ) + { + assert( false ); + } + }; + //@endcond + + } // namespace flat_combining +}} // namespace cds::algo + +#endif // #ifndef __CDS_ALGO_FLAT_COMBINING_H diff --git a/cds/backoff_strategy.h b/cds/backoff_strategy.h new file mode 100644 index 00000000..b36df538 --- /dev/null +++ b/cds/backoff_strategy.h @@ -0,0 +1,426 @@ +//$$CDS-header$$ + +#ifndef __CDS_BACKOFF_STRATEGY_H +#define __CDS_BACKOFF_STRATEGY_H + +/* + Filename: backoff_strategy.h + Created 2007.03.01 by Maxim Khiszinsky + + Description: + Generic back-off strategies + + Editions: + 2007.03.01 Maxim Khiszinsky Created + 2008.10.02 Maxim Khiszinsky Backoff action transfers from contructor to operator() for all backoff schemas + 2009.09.10 Maxim Khiszinsky reset() function added +*/ + +#include +#include +#include + +namespace cds { + /// Different backoff schemes + /** + Back-off schema may be used in lock-free algorithms when the algorithm cannot perform some action because a conflict + with the other concurrent operation is encountered. In this case current thread can do another work or can call + processor's performance hint. + + The interface of back-off strategy is following: + \code + struct backoff_strategy { + void operator()(); + template bool operator()( Predicate pr ); + void reset(); + }; + \endcode + + \p operator() operator calls back-off strategy's action. It is main part of back-off strategy. + + Interruptible back-off template < typename Predicate > bool operator()( Predicate pr ) + allows to interrupt back-off spinning if \p pr predicate returns \p true. + \p Predicate is a functor with the following interface: + \code + struct predicate { + bool operator()(); + }; + \endcode + + \p reset() function resets internal state of back-off strategy to initial state. It is required for some + back-off strategies, for example, exponential back-off. + */ + namespace backoff { + + /// Empty backoff strategy. Do nothing + struct empty { + //@cond + void operator ()() + {} + + template + bool operator()( Predicate pr ) + { + return pr(); + } + + void reset() + {} + //@endcond + }; + + /// Switch to another thread (yield). Good for thread preemption architecture. + struct yield { + //@cond + void operator ()() + { + cds_std::this_thread::yield(); + //OS::yield(); + } + + template + bool operator()( Predicate pr ) + { + if ( pr() ) + return true; + operator()(); + return false; + } + + void reset() + {} + //@endcond + }; + + /// Random pause + /** + This back-off strategy calls processor-specific pause hint instruction + if one is available for the processor architecture. + */ + struct pause { + //@cond + void operator ()() + { +# ifdef CDS_backoff_pause_defined + platform::backoff_pause(); +# endif + } + + template + bool operator()( Predicate pr ) + { + if ( pr() ) + return true; + operator()(); + return false; + } + + void reset() + {} + //@endcond + }; + + /// Processor hint back-off + /** + This back-off schema calls performance hint instruction if it is available for current processor. + Otherwise, it calls \p nop. + */ + struct hint + { + //@cond + void operator ()() + { +# if defined(CDS_backoff_hint_defined) + platform::backoff_hint(); +# elif defined(CDS_backoff_nop_defined) + platform::backoff_nop(); +# endif + } + + template + bool operator()( Predicate pr ) + { + if ( pr() ) + return true; + operator()(); + return false; + } + + void reset() + {} + //@endcond + }; + + /// Exponential back-off + /** + This back-off strategy is composite. It consists of \p SpinBkoff and \p YieldBkoff + back-off strategy. In first, the strategy tries to apply repeatedly \p SpinBkoff + (spinning phase) until internal counter of failed attempts reaches its maximum + spinning value. Then, the strategy transits to high-contention phase + where it applies \p YieldBkoff until \p reset() is called. + On each spinning iteration the internal spinning counter is doubled. + + Choosing the best value for maximum spinning bound is platform and task specific. + In this implementation, the default values for maximum and minimum spinning is statically + declared so you can set its value globally for your platform. + The third template argument, \p Tag, is used to separate implementation. For + example, you may define two \p exponential back-offs that is the best for your task A and B: + \code + + #include + namespace bkoff = cds::backoff; + + struct tagA ; // tag to select task A implementation + struct tagB ; // tag to select task B implementation + + // // define your back-off specialization + typedef bkoff::exponential expBackOffA; + typedef bkoff::exponential expBackOffB; + + // // set up the best bounds for task A + expBackOffA::s_nExpMin = 32; + expBackOffA::s_nExpMax = 1024; + + // // set up the best bounds for task B + expBackOffB::s_nExpMin = 2; + expBackOffB::s_nExpMax = 512; + + \endcode + + Another way of solving this problem is subclassing \p exponential back-off class: + \code + #include + namespace bkoff = cds::backoff; + typedef bkoff::exponential base_bkoff; + + class expBackOffA: public base_bkoff + { + public: + expBackOffA() + : base_bkoff( 32, 1024 ) + {} + }; + + class expBackOffB: public base_bkoff + { + public: + expBackOffB() + : base_bkoff( 2, 512 ) + {} + }; + \endcode + */ + template + class exponential + { + public: + typedef SpinBkoff spin_backoff ; ///< spin back-off strategy + typedef YieldBkoff yield_backoff ; ///< yield back-off strategy + typedef Tag impl_tag ; ///< implementation separation tag + + static size_t s_nExpMin ; ///< Default minimum spinning bound (16) + static size_t s_nExpMax ; ///< Default maximum spinning bound (16384) + + protected: + size_t m_nExpCur ; ///< Current spinning + size_t m_nExpMin ; ///< Minimum spinning bound + size_t m_nExpMax ; ///< Maximum spinning bound + + spin_backoff m_bkSpin ; ///< Spinning (fast-path) phase back-off strategy + yield_backoff m_bkYield ; ///< Yield phase back-off strategy + + public: + /// Initializes m_nExpMin and m_nExpMax from default s_nExpMin and s_nExpMax respectively + exponential() + : m_nExpMin( s_nExpMin ) + , m_nExpMax( s_nExpMax ) + { + m_nExpCur = m_nExpMin; + } + + /// Explicitly defined bounds of spinning + /** + The \p libcds library never calls this ctor. + */ + exponential( + size_t nExpMin, ///< Minimum spinning + size_t nExpMax ///< Maximum spinning + ) + : m_nExpMin( nExpMin ) + , m_nExpMax( nExpMax ) + { + m_nExpCur = m_nExpMin; + } + + //@cond + void operator ()() + { + if ( m_nExpCur <= m_nExpMax ) { + for ( size_t n = 0; n < m_nExpCur; ++n ) + m_bkSpin(); + m_nExpCur *= 2; + } + else + m_bkYield(); + } + + template + bool operator()( Predicate pr ) + { + if ( m_nExpCur <= m_nExpMax ) { + for ( size_t n = 0; n < m_nExpCur; ++n ) { + if ( m_bkSpin(pr) ) + return true; + } + m_nExpCur *= 2; + } + else + return m_bkYield(pr); + return false; + } + + void reset() + { + m_nExpCur = m_nExpMin; + m_bkSpin.reset(); + m_bkYield.reset(); + } + //@endcond + }; + + //@cond + template + size_t exponential::s_nExpMin = 16; + + template + size_t exponential::s_nExpMax = 16 * 1024; + //@endcond + + /// Delay back-off strategy + /** + Template arguments: + - \p Duration - duration type, default is \p std::chrono::milliseconds + - \p Tag - a selector tag + + Choosing the best value for th timeout is platform and task specific. + In this implementation, the default values for timeout is statically + declared so you can set its value globally for your platform. + The second template argument, \p Tag, is used to separate implementation. For + example, you may define two \p delay back-offs for 5 and 10 ms timeout: + \code + + #include + namespace bkoff = cds::backoff; + + struct ms5 ; // tag to select 5ms + struct ms10 ; // tag to select 10ms + + // // define your back-off specialization + typedef bkoff::delay delay5; + typedef bkoff::delay delay10; + + // // set up the timeouts + delay5::s_nTimeout = 5; + delay10::s_nTimeout = 10; + \endcode + + Another way of solving this problem is subclassing \p delay back-off class: + \code + #include + namespace bkoff = cds::backoff; + typedef bkoff::delay<> delay_bkoff; + + class delay5: public delay_bkoff { + public: + delay5(): delay_bkoff( 5 ) {} + }; + + class delay10: public delay_bkoff { + public: + delay10(): delay_bkoff( 10 ) {} + }; + \endcode + + */ + template + class delay + { + public: + typedef Duration duration_type; ///< Duration type (default \p std::chrono::milliseconds) + static unsigned int s_nTimeout; ///< default timeout, =5 + + protected: + ///@cond + unsigned int const m_nTimeout; + ///@endcond + + public: + /// Default ctor takes the timeout from s_nTimeout + delay() + : m_nTimeout( s_nTimeout ) + {} + + /// Initializes timeout from \p nTimeout + CDS_CONSTEXPR delay( unsigned int nTimeout ) + : m_nTimeout( nTimeout ) + {} + + //@cond + void operator()() const + { + cds_std::this_thread::sleep_for( duration_type( m_nTimeout )); + } + + template + bool operator()( Predicate pr ) const + { + for ( unsigned int i = 0; i < m_nTimeout; i += 2 ) { + if ( pr() ) + return true; + cds_std::this_thread::sleep_for( duration_type( 2 )); + } + return false; + } + + void reset() const + {} + //@endcond + }; + + //@cond + template + unsigned int delay::s_nTimeout = 5; + //@endcond + + + /// Delay back-off strategy, template version + /** + This is a template version of backoff::delay class. + Template parameter \p Timeout sets a delay timeout. + The declaration cds::backoff::delay_of< 5 > bkoff is equal for + cds::backoff::delay<> bkoff(5). + */ + template + class delay_of: public delay + { + //@cond + typedef delay base_class; + public: + delay_of() + : base_class( Timeout ) + {} + //@endcond + }; + + + /// Default backoff strategy + typedef exponential Default; + + /// Default back-off strategy for lock primitives + typedef exponential LockDefault; + + } // namespace backoff +} // namespace cds + + +#endif // #ifndef __CDS_BACKOFF_STRATEGY_H diff --git a/cds/bitop.h b/cds/bitop.h new file mode 100644 index 00000000..2ab7a5ef --- /dev/null +++ b/cds/bitop.h @@ -0,0 +1,140 @@ +//$$CDS-header$$ + +#ifndef __CDS_BITOP_H +#define __CDS_BITOP_H + +/* + Different bit algorithms: + LSB get least significant bit number + MSB get most significant bit number + bswap swap byte order of word + RBO reverse bit order of word + + Editions: + 2007.10.08 Maxim.Khiszinsky Created +*/ + +#include +#include + +namespace cds { + /// Bit operations + namespace bitop { + + ///@cond none + namespace details { + template struct BitOps; + + // 32-bit bit ops + template <> struct BitOps<4> { + typedef atomic32u_t TUInt; + + static int MSB( TUInt x ) { return bitop::platform::msb32( x ); } + static int LSB( TUInt x ) { return bitop::platform::lsb32( x ); } + static int MSBnz( TUInt x ) { return bitop::platform::msb32nz( x ); } + static int LSBnz( TUInt x ) { return bitop::platform::lsb32nz( x ); } + static int SBC( TUInt x ) { return bitop::platform::sbc32( x ) ; } + static int ZBC( TUInt x ) { return bitop::platform::zbc32( x ) ; } + + static TUInt RBO( TUInt x ) { return bitop::platform::rbo32( x ); } + static bool complement( TUInt& x, int nBit ) { return bitop::platform::complement32( &x, nBit ); } + + static TUInt RandXorShift(TUInt x) { return bitop::platform::RandXorShift32(x); } + }; + + // 64-bit bit ops + template <> struct BitOps<8> { + typedef atomic64u_unaligned TUInt; + + static int MSB( TUInt x ) { return bitop::platform::msb64( x ); } + static int LSB( TUInt x ) { return bitop::platform::lsb64( x ); } + static int MSBnz( TUInt x ) { return bitop::platform::msb64nz( x ); } + static int LSBnz( TUInt x ) { return bitop::platform::lsb64nz( x ); } + static int SBC( TUInt x ) { return bitop::platform::sbc64( x ) ; } + static int ZBC( TUInt x ) { return bitop::platform::zbc64( x ) ; } + + static TUInt RBO( TUInt x ) { return bitop::platform::rbo64( x ); } + static bool complement( TUInt& x, int nBit ) { return bitop::platform::complement64( &x, nBit ); } + + static TUInt RandXorShift(TUInt x) { return bitop::platform::RandXorShift64(x); } + }; + } // namespace details + //@endcond + + + /// Get least significant bit (LSB) number (1..32/64), 0 if nArg == 0 + template + static inline int LSB( T nArg ) + { + return details::BitOps< sizeof(T) >::LSB( (typename details::BitOps::TUInt) nArg ); + } + + /// Get least significant bit (LSB) number (0..31/63) + /** + Precondition: nArg != 0 + */ + template + static inline int LSBnz( T nArg ) + { + assert( nArg != 0 ); + return details::BitOps< sizeof(T) >::LSBnz( (typename details::BitOps::TUInt) nArg ); + } + + /// Get most significant bit (MSB) number (1..32/64), 0 if nArg == 0 + template + static inline int MSB( T nArg ) + { + return details::BitOps< sizeof(T) >::MSB( (typename details::BitOps::TUInt) nArg ); + } + + /// Get most significant bit (MSB) number (0..31/63) + /** + Precondition: nArg != 0 + */ + template + static inline int MSBnz( T nArg ) + { + assert( nArg != 0 ); + return details::BitOps< sizeof(T) >::MSBnz( (typename details::BitOps::TUInt) nArg ); + } + + /// Get non-zero bit count of a word + template + static inline int SBC( T nArg ) + { + return details::BitOps< sizeof(T) >::SBC( (typename details::BitOps::TUInt) nArg ); + } + + /// Get zero bit count of a word + template + static inline int ZBC( T nArg ) + { + return details::BitOps< sizeof(T) >::ZBC( (typename details::BitOps::TUInt) nArg ); + } + + /// Reverse bit order of \p nArg + template + static inline T RBO( T nArg ) + { + return (T) details::BitOps< sizeof(T) >::RBO( (typename details::BitOps::TUInt) nArg ); + } + + /// Complement bit \p nBit in \p nArg + template + static inline bool complement( T& nArg, int nBit ) + { + return details::BitOps< sizeof(T) >::complement( reinterpret_cast< typename details::BitOps::TUInt& >( nArg ), nBit ); + } + + /// Simple random number generator + template + static inline T RandXorShift( T x) + { + return (T) details::BitOps< sizeof(T) >::RandXorShift(x); + } + + } // namespace bitop +} //namespace cds + +#endif // #ifndef __CDS_BITOP_H + diff --git a/cds/compiler/backoff.h b/cds/compiler/backoff.h new file mode 100644 index 00000000..f8ec9e7d --- /dev/null +++ b/cds/compiler/backoff.h @@ -0,0 +1,32 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_BACKOFF_IMPL_H +#define __CDS_COMPILER_BACKOFF_IMPL_H + +#if CDS_COMPILER == CDS_COMPILER_MSVC || (CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS) +# if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64 +# include +# else +# error "MS VC++ compiler: unsupported processor architecture" +# endif +#elif CDS_COMPILER == CDS_COMPILER_GCC || CDS_COMPILER == CDS_COMPILER_CLANG || CDS_COMPILER == CDS_COMPILER_INTEL +# if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_IA64 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_SPARC +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_PPC64 +# include +//# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_ARM7 +//# include +# endif +#else +# error "Undefined compiler" +#endif + +#endif // #ifndef __CDS_COMPILER_BACKOFF_IMPL_H diff --git a/cds/compiler/bitop.h b/cds/compiler/bitop.h new file mode 100644 index 00000000..42ced24a --- /dev/null +++ b/cds/compiler/bitop.h @@ -0,0 +1,40 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_BITOP_H +#define __CDS_COMPILER_BITOP_H + +// Choose appropriate header for current architecture and compiler + +#if CDS_COMPILER == CDS_COMPILER_MSVC || (CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS) +/************************************************************************/ +/* MS Visual C++ */ +/************************************************************************/ + +# if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64 +# include +# endif + +#elif CDS_COMPILER == CDS_COMPILER_GCC || CDS_COMPILER == CDS_COMPILER_CLANG || CDS_COMPILER == CDS_COMPILER_INTEL +/************************************************************************/ +/* GCC */ +/************************************************************************/ + +# if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_SPARC +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_IA64 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_PPC64 +# include +# endif +#endif // Compiler choice + +// Generic (C) implementation +#include + +#endif // #ifndef __CDS_COMPILER_BITOP_H diff --git a/cds/compiler/clang/cxx11_atomic_prepatches.h b/cds/compiler/clang/cxx11_atomic_prepatches.h new file mode 100644 index 00000000..fb86722a --- /dev/null +++ b/cds/compiler/clang/cxx11_atomic_prepatches.h @@ -0,0 +1,54 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_CLANG_CXX11_ATOMIC_PREPATCHES_H +#define __CDS_COMPILER_CLANG_CXX11_ATOMIC_PREPATCHES_H + +#if CDS_CXX11_ATOMIC_SUPPORT == 1 +# if CDS_COMPILER_VERSION == 30100 + // Clang 3.1 workaround + // Clang 3.1 does not support __atomic_is_lock_free intrinsic function from GCC + template + static inline bool __atomic_is_lock_free( size_t, T* ) CDS_NOEXCEPT + { + return sizeof(T) <= 8; + } + + // clang does not implement GCC 4.7 low-level __atomic_xxx intrinsics + // See http://comments.gmane.org/gmane.comp.compilers.clang.devel/20093 + + // In Clang 3.1 the following workaround results in + // Assertion `Proto && "Functions without a prototype cannot be overloaded"' failed + // and clang crashed + // So, we cannot use Clang with GCC 4.7 atomics +/* + template + static inline void __atomic_load(T *ptr, T *ret, int memmodel) + { + *ret = __atomic_load( ptr, memmodel ); + } + + template + static inline void __atomic_store(T *ptr, T *val, int memmodel) + { + __atomic_store( ptr, *val, memmodel ); + } + + template + static inline void __atomic_exchange(T *ptr, T *val, T *ret, int memmodel) + { + *ret = __atomic_exchange( ptr, *val, memmodel ); + } + + template + static inline bool __atomic_compare_exchange(T *ptr, T *expected, T *desired, bool weak, int success_memmodel, int failure_memmodel) + { + if ( weak ) + return __atomic_compare_exchange_weak( ptr, expected, *desired, success_memmodel, failure_memmodel ); + else + return __atomic_compare_exchange_strong( ptr, expected, *desired, success_memmodel, failure_memmodel ); + } +*/ +# endif +#endif + +#endif // #ifndef __CDS_COMPILER_CLANG_CXX11_ATOMIC_PREPATCHES_H diff --git a/cds/compiler/clang/defs.h b/cds/compiler/clang/defs.h new file mode 100644 index 00000000..0c2fe854 --- /dev/null +++ b/cds/compiler/clang/defs.h @@ -0,0 +1,128 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_CLANG_DEFS_H +#define __CDS_COMPILER_CLANG_DEFS_H + +// Compiler version +#define CDS_COMPILER_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) + +// Compiler name +#define CDS_COMPILER__NAME ("clang " __clang_version__) +#define CDS_COMPILER__NICK "clang" + +// C++11 atomic support - only for libc++ +// Note: Clang libc++ atomic leads to program crash. +// So, we use libcds atomic implementation +//#if __has_feature(cxx_atomic) && defined(_LIBCPP_VERSION) +//# define CDS_CXX11_ATOMIC_SUPPORT 1 +//#endif + + +#include + + +#define alignof __alignof__ + +// Variadic template support (only if -std=c++0x compile-line option provided) +#if __has_feature(cxx_variadic_templates) +# define CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT +#endif + +// Default template arguments for function templates +#if __has_feature(cxx_default_function_template_args) +# define CDS_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS_SUPPORT +#endif + +#if __has_feature(cxx_deleted_functions) +// C++11 delete definition ( function declaration = delete) +# define CDS_CXX11_DELETE_DEFINITION_SUPPORT +#endif + +#if __has_feature(cxx_defaulted_functions) +// C++11 explicitly-defaulted function (= default) [std 8.4.2 [dcl.fct.def.default]] +# define CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT +#endif + +// Explicit conversion operators +#if __has_feature(cxx_explicit_conversions) +# define CDS_CXX11_EXPLICIT_CONVERSION_OPERATOR_SUPPORT +#endif + +// C++11 template alias +#if __has_feature(cxx_alias_templates) +# define CDS_CXX11_TEMPLATE_ALIAS_SUPPORT +#endif + +// C++11 inline namespace +#if __has_feature(cxx_inline_namespaces) +# define CDS_CXX11_INLINE_NAMESPACE_SUPPORT +#endif + +// Lambda +#if __has_feature(cxx_lambdas) +# define CDS_CXX11_LAMBDA_SUPPORT +#endif + +// RValue +#if __has_feature(cxx_rvalue_references) +# define CDS_RVALUE_SUPPORT +# define CDS_MOVE_SEMANTICS_SUPPORT +#endif + +#if __has_feature(cxx_constexpr) +# define CDS_CONSTEXPR constexpr +# define CDS_CONSTEXPR_CONST constexpr const +#else +# define CDS_CONSTEXPR +# define CDS_CONSTEXPR_CONST const +#endif + +#if __has_feature(cxx_noexcept) +# define CDS_NOEXCEPT_SUPPORT noexcept +# define CDS_NOEXCEPT_SUPPORT_(expr) noexcept(expr) +#else +# define CDS_NOEXCEPT_SUPPORT +# define CDS_NOEXCEPT_SUPPORT_(expr) +#endif + +// C++11 thread_local keyword +#if __has_feature(cxx_thread_local) // CLang 3.3 +# define CDS_CXX11_THREAD_LOCAL_SUPPORT +#endif + + +#include + +// Thread support library (thread, mutex, condition variable, chrono) +#if CDS_COMPILER_VERSION >= 30100 +# if __has_include() +# define CDS_CXX11_STDLIB_THREAD +# endif + +# if __has_include() +# define CDS_CXX11_STDLIB_CHRONO +# endif + +# if __has_include() +# define CDS_CXX11_STDLIB_MUTEX +# endif + +# if __has_include() +# define CDS_CXX11_STDLIB_CONDITION_VARIABLE +# endif +#endif + +// Full SFINAE support +#define CDS_CXX11_SFINAE + +// ************************************************* +// Alignment macro + +#define CDS_TYPE_ALIGNMENT(n) __attribute__ ((aligned (n))) +#define CDS_CLASS_ALIGNMENT(n) __attribute__ ((aligned (n))) +#define CDS_DATA_ALIGNMENT(n) __attribute__ ((aligned (n))) + + +#include + +#endif // #ifndef __CDS_COMPILER_GCC_DEFS_H diff --git a/cds/compiler/cstdint_boost.h b/cds/compiler/cstdint_boost.h new file mode 100644 index 00000000..cbaae293 --- /dev/null +++ b/cds/compiler/cstdint_boost.h @@ -0,0 +1,44 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_CSTDINT_BOOST_H +#define __CDS_COMPILER_CSTDINT_BOOST_H +//@cond + +#include + +namespace cds { + using boost::int_least8_t; + using boost::uint_least8_t; + using boost::int_least16_t; + using boost::uint_least16_t; + using boost::int_least32_t; + using boost::uint_least32_t; + using boost::int_least64_t; + using boost::uint_least64_t; + using boost::int_fast8_t; + using boost::uint_fast8_t; + using boost::int_fast16_t; + using boost::uint_fast16_t; + using boost::int_fast32_t; + using boost::uint_fast32_t; + using boost::int_fast64_t; + using boost::uint_fast64_t; + + using boost::intmax_t; + using boost::uintmax_t; + + using boost::int8_t; + using boost::uint8_t; + using boost::int16_t; + using boost::uint16_t; + using boost::int32_t; + using boost::uint32_t; + using boost::int64_t; + using boost::uint64_t; + + using ::intptr_t; + using ::uintptr_t; +} // namespace cds + +//@endcond +#endif // #ifndef __CDS_COMPILER_CSTDINT_BOOST_H diff --git a/cds/compiler/cstdint_std.h b/cds/compiler/cstdint_std.h new file mode 100644 index 00000000..c3919c6e --- /dev/null +++ b/cds/compiler/cstdint_std.h @@ -0,0 +1,43 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_CSTDINT_STD_H +#define __CDS_COMPILER_CSTDINT_STD_H +//@cond + +#include +namespace cds { + using std::int_least8_t; + using std::uint_least8_t; + using std::int_least16_t; + using std::uint_least16_t; + using std::int_least32_t; + using std::uint_least32_t; + using std::int_least64_t; + using std::uint_least64_t; + using std::int_fast8_t; + using std::uint_fast8_t; + using std::int_fast16_t; + using std::uint_fast16_t; + using std::int_fast32_t; + using std::uint_fast32_t; + using std::int_fast64_t; + using std::uint_fast64_t; + + using std::intmax_t; + using std::uintmax_t; + + using std::int8_t; + using std::uint8_t; + using std::int16_t; + using std::uint16_t; + using std::int32_t; + using std::uint32_t; + using std::int64_t; + using std::uint64_t; + using std::intptr_t; + using std::uintptr_t; +} // namespace cds + + +//@endcond +#endif // #ifndef __CDS_COMPILER_CSTDINT_STD_H diff --git a/cds/compiler/cxx11_atomic.h b/cds/compiler/cxx11_atomic.h new file mode 100644 index 00000000..1adbb3e8 --- /dev/null +++ b/cds/compiler/cxx11_atomic.h @@ -0,0 +1,2277 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_CXX11_ATOMIC_H +#define __CDS_COMPILER_CXX11_ATOMIC_H +//@cond + +#include +#include + +namespace cds { namespace cxx11_atomics { + typedef enum memory_order { + memory_order_relaxed, + memory_order_consume, + memory_order_acquire, + memory_order_release, + memory_order_acq_rel, + memory_order_seq_cst + } memory_order; + +}} // namespace cds::cxx11_atomics + + +#if CDS_COMPILER == CDS_COMPILER_MSVC || (CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS) +# if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64 +# include +# else +# error "MS VC++ compiler: unsupported processor architecture" +# endif +#elif CDS_COMPILER == CDS_COMPILER_GCC || CDS_COMPILER == CDS_COMPILER_CLANG || CDS_COMPILER == CDS_COMPILER_INTEL +# if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_IA64 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_SPARC +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_PPC64 +# include +//# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_ARM7 +//# include +# else +# error "GCC compiler: unsupported processor architecture. Try to use native C++11 atomic or boost.atomic" +# endif +#else +# error "Undefined compiler" +#endif + +// In C++11, make_unsigned is declared in +#include // for make_unsigned + +namespace cds { namespace cxx11_atomics { + + // forward declarations + template + struct atomic; + + namespace details { + + template + struct atomic_generic_ops; + + template + struct atomic_integral_ops; + + template + struct primary_type; + + template <> + struct primary_type<1> + { + typedef cds::uint8_t type; + }; + template <> + struct primary_type<2> + { + typedef cds::uint16_t type; + }; + template <> + struct primary_type<4> + { + typedef cds::uint32_t type; + }; + template <> + struct primary_type<8> + { + typedef cds::uint64_t type; + }; + + template + struct make_atomic_primary + { + typedef T source_type; + typedef Primary primary_type; + + static primary_type volatile * ptr( source_type volatile * p ) CDS_NOEXCEPT + { + return reinterpret_cast(p); + } + static primary_type const volatile * ptr( source_type const volatile * p ) CDS_NOEXCEPT + { + return reinterpret_cast(p); + } + + static primary_type val( source_type v ) CDS_NOEXCEPT + { + return *reinterpret_cast(&v); + } + + static primary_type& ref( source_type& v ) CDS_NOEXCEPT + { + return reinterpret_cast(v); + } + + static primary_type const& ref( source_type const& v ) CDS_NOEXCEPT + { + return reinterpret_cast(v); + } + + static source_type ret( primary_type r ) CDS_NOEXCEPT + { + return *reinterpret_cast(&r); + } + }; + + template + struct make_atomic_primary + { + typedef T source_type; + typedef T primary_type; + + static primary_type volatile * ptr( source_type volatile * p ) CDS_NOEXCEPT + { + return p; + } + static primary_type const volatile * ptr( source_type const volatile * p ) CDS_NOEXCEPT + { + return p; + } + + static primary_type val( source_type v ) CDS_NOEXCEPT + { + return v; + } + + static primary_type& ref( source_type& v ) CDS_NOEXCEPT + { + return v; + } + + static source_type ret( primary_type r ) CDS_NOEXCEPT + { + return r; + } + }; + + template + struct atomic_integral_bitwise_ops + { + public: + typedef typename boost::make_unsigned::type unsigned_type; + typedef atomic_generic_ops atomic_ops; + + static T fetch_and(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { + unsigned_type cur = atomic_ops::atomic_load_explicit( reinterpret_cast(pDest), memory_order_relaxed ); + do {} while ( !atomic_ops::atomic_compare_exchange_weak_explicit( + reinterpret_cast(pDest), &cur, cur & unsigned_type(val), order, memory_order_relaxed )); + return T(cur); + } + + static T fetch_or(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { + unsigned_type cur = atomic_ops::atomic_load_explicit( reinterpret_cast(pDest), memory_order_relaxed ); + do {} while ( !atomic_ops::atomic_compare_exchange_weak_explicit( + reinterpret_cast(pDest), &cur, cur | unsigned_type(val), order, memory_order_relaxed )); + return T(cur); + } + + static T fetch_xor(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { + unsigned_type cur = atomic_ops::atomic_load_explicit( reinterpret_cast(pDest), memory_order_relaxed ); + do {} while ( !atomic_ops::atomic_compare_exchange_weak_explicit( + reinterpret_cast(pDest), &cur, cur ^ unsigned_type(val), order, memory_order_relaxed )); + return T(cur); + } + }; + + + // 8-bit atomic operations + + template + struct atomic_generic_ops< T, 1, Primary > + { + typedef make_atomic_primary primary; + + // store + static void atomic_store_explicit( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + platform::store8( primary::ptr(pDest), primary::val(v), order ); + } + static void atomic_store_explicit( T * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + platform::store8( primary::ptr(pDest), primary::val(v), order ); + } + static void atomic_store( T volatile * pDest, T v ) CDS_NOEXCEPT + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + static void atomic_store( T * pDest, T v ) CDS_NOEXCEPT + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + + // load + static T atomic_load_explicit( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + return primary::ret( platform::load8( primary::ptr(pSrc), order )); + } + static T atomic_load_explicit( T const * pSrc, memory_order order ) CDS_NOEXCEPT + { + return primary::ret( platform::load8( primary::ptr(pSrc), order )); + } + static T atomic_load( T volatile const * pSrc ) CDS_NOEXCEPT + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + static T atomic_load( T const * pSrc ) CDS_NOEXCEPT + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + + // exchange + static T atomic_exchange_explicit( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT + { + return primary::ret( platform::exchange8( primary::ptr(pDest), primary::val(val), order )); + } + static T atomic_exchange_explicit( T * pDest, T val, memory_order order ) CDS_NOEXCEPT + { + return primary::ret( platform::exchange8( primary::ptr(pDest), primary::val(val), order )); + } + static T atomic_exchange( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_exchange( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + + // cas + static bool atomic_compare_exchange_weak_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas8_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas8_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak( T volatile * pDest, T * expected, T desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_weak( T * pDest, T * expected, T desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas8_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas8_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong( T volatile * pDest, T * expected, T desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong( T * pDest, T * expected, T desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + }; + + template + struct atomic_integral_ops< T, 1 > + : atomic_generic_ops + , atomic_integral_bitwise_ops + { + typedef atomic_integral_bitwise_ops bitwise_ops; + + // fetch_add + static T atomic_fetch_add_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch8_add_defined + return platform::fetch8_add( pDest, val, order ); +# else + T cur = atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !atomic_compare_exchange_weak_explicit( pDest, &cur, cur + val, order, memory_order_relaxed )); + return cur; +# endif + } + static T atomic_fetch_add_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_add_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_add( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_add( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_sub + static T atomic_fetch_sub_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch8_sub_defined + return platform::fetch8_sub( pDest, val, order ); +# else + T cur = atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !atomic_compare_exchange_weak_explicit( pDest, &cur, cur - val, order, memory_order_relaxed )); + return cur; +# endif + } + static T atomic_fetch_sub_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_sub_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_sub( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_sub( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_and + static T atomic_fetch_and_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch8_and_defined + return platform::fetch8_and( pDest, val, order ); +# else + return bitwise_ops::fetch_and( pDest, val, order ); +# endif + } + static T atomic_fetch_and_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_and_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_and( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_and( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_or + static T atomic_fetch_or_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch8_or_defined + return platform::fetch8_or( pDest, val, order ); +# else + return bitwise_ops::fetch_or( pDest, val, order ); +# endif + } + static T atomic_fetch_or_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_or_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_or( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_or( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_xor + static T atomic_fetch_xor_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch8_xor_defined + return platform::fetch8_xor( pDest, val, order ); +# else + return bitwise_ops::fetch_xor( pDest, val, order ); +# endif + } + static T atomic_fetch_xor_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_xor_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_xor( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_xor( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); + } + }; + + // 16-bit atomic operations + + template + struct atomic_generic_ops< T, 2, Primary > + { + typedef make_atomic_primary primary; + + // store + static void atomic_store_explicit( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + platform::store16( primary::ptr(pDest), primary::val(v), order ); + } + static void atomic_store_explicit( T * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + platform::store16( primary::ptr(pDest), primary::val(v), order ); + } + static void atomic_store( T volatile * pDest, T v ) CDS_NOEXCEPT + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + static void atomic_store( T * pDest, T v ) CDS_NOEXCEPT + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + + // load + static T atomic_load_explicit( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + return primary::ret( platform::load16( primary::ptr(pSrc), order )); + } + static T atomic_load_explicit( T const * pSrc, memory_order order ) CDS_NOEXCEPT + { + return primary::ret( platform::load16( primary::ptr(pSrc), order )); + } + static T atomic_load( T volatile const * pSrc ) CDS_NOEXCEPT + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + static T atomic_load( T const * pSrc ) CDS_NOEXCEPT + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + + // exchange + static T atomic_exchange_explicit( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT + { + return primary::ret( platform::exchange16( primary::ptr(pDest), primary::val(val), order )); + } + static T atomic_exchange_explicit( T * pDest, T val, memory_order order ) CDS_NOEXCEPT + { + return primary::ret( platform::exchange16( primary::ptr(pDest), primary::val(val), order )); + } + static T atomic_exchange( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_exchange( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + + // cas + static bool atomic_compare_exchange_weak_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas16_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas16_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak( T volatile * pDest, T * expected, T desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_weak_explicit( pDest, expected, primary::val(desired), memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_weak( T * pDest, T * expected, T desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas16_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas16_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong( T volatile * pDest, T * expected, T desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong( T * pDest, T * expected, T desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + }; + + template + struct atomic_integral_ops< T, 2 > + : atomic_generic_ops< T, 2, T > + , atomic_integral_bitwise_ops + { + typedef atomic_integral_bitwise_ops bitwise_ops; + + // fetch_add + static T atomic_fetch_add_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch16_add_defined + return platform::fetch16_add( pDest, val, order ); +# else + T cur = atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !atomic_compare_exchange_weak_explicit( pDest, &cur, cur + val, order, memory_order_relaxed )); + return cur; +# endif + } + static T atomic_fetch_add_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_add_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_add( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_add( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_sub + static T atomic_fetch_sub_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch16_sub_defined + return platform::fetch16_sub( pDest, val, order ); +# else + T cur = atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !atomic_compare_exchange_weak_explicit( pDest, &cur, cur - val, order, memory_order_relaxed )); + return cur; +# endif + } + static T atomic_fetch_sub_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_sub_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_sub( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_sub( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_and + static T atomic_fetch_and_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch16_and_defined + return platform::fetch16_and( pDest, val, order ); +# else + return bitwise_ops::fetch_and( pDest, val, order ); +# endif + } + static T atomic_fetch_and_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_and_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_and( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_and( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_or + static T atomic_fetch_or_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch16_or_defined + return platform::fetch16_or( pDest, val, order ); +# else + return bitwise_ops::fetch_or( pDest, val, order ); +# endif + } + static T atomic_fetch_or_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_or_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_or( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_or( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_xor + static T atomic_fetch_xor_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch16_xor_defined + return platform::fetch16_xor( pDest, val, order ); +# else + return bitwise_ops::fetch_xor( pDest, val, order ); +# endif + } + static T atomic_fetch_xor_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_xor_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_xor( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_xor( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); + } + }; + + // 32-bit atomic operations + + template + struct atomic_generic_ops< T, 4, Primary > + { + typedef make_atomic_primary primary; + + // store + static void atomic_store_explicit( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + platform::store32( primary::ptr(pDest), primary::val(v), order ); + } + static void atomic_store_explicit( T * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + platform::store32( primary::ptr(pDest), primary::val(v), order ); + } + static void atomic_store( T volatile * pDest, T v ) CDS_NOEXCEPT + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + static void atomic_store( T * pDest, T v ) CDS_NOEXCEPT + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + + // load + static T atomic_load_explicit( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + return primary::ret( platform::load32( primary::ptr(pSrc), order )); + } + static T atomic_load_explicit( T const * pSrc, memory_order order ) CDS_NOEXCEPT + { + return primary::ret( platform::load32( primary::ptr(pSrc), order )); + } + static T atomic_load( T volatile const * pSrc ) CDS_NOEXCEPT + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + static T atomic_load( T const * pSrc ) CDS_NOEXCEPT + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + + // exchange + static T atomic_exchange_explicit( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT + { + return primary::ret( platform::exchange32( primary::ptr(pDest), primary::val(val), order )); + } + static T atomic_exchange_explicit( T * pDest, T val, memory_order order ) CDS_NOEXCEPT + { + return primary::ret( platform::exchange32( primary::ptr(pDest), primary::val(val), order )); + } + static T atomic_exchange( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_exchange( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + + // cas + static bool atomic_compare_exchange_weak_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas32_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas32_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak( T volatile * pDest, T * expected, T desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_weak( T * pDest, T * expected, T desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas32_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas32_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong( T volatile * pDest, T * expected, T desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong( T * pDest, T * expected, T desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + }; + + template + struct atomic_integral_ops< T, 4 > + : atomic_generic_ops< T, 4, T > + , atomic_integral_bitwise_ops + { + typedef atomic_integral_bitwise_ops bitwise_ops; + // fetch_add + static T atomic_fetch_add_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch32_add_defined + return platform::fetch32_add( pDest, val, order ); +# else + T cur = atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !atomic_compare_exchange_weak_explicit( pDest, &cur, cur + val, order, memory_order_relaxed )); + return cur; +# endif + } + static T atomic_fetch_add_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_add_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_add( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_add( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_sub + static T atomic_fetch_sub_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch32_sub_defined + return platform::fetch32_sub( pDest, val, order ); +# else + T cur = atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !atomic_compare_exchange_weak_explicit( pDest, &cur, cur - val, order, memory_order_relaxed )); + return cur; +# endif + } + static T atomic_fetch_sub_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_sub_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_sub( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_sub( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_and + static T atomic_fetch_and_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch32_and_defined + return platform::fetch32_and( pDest, val, order ); +# else + return bitwise_ops::fetch_and( pDest, val, order ); +# endif + } + static T atomic_fetch_and_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_and_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_and( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_and( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_or + static T atomic_fetch_or_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch32_or_defined + return platform::fetch32_or( pDest, val, order ); +# else + return bitwise_ops::fetch_or( pDest, val, order ); +# endif + } + static T atomic_fetch_or_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_or_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_or( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_or( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_xor + static T atomic_fetch_xor_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch32_xor_defined + return platform::fetch32_xor( pDest, val, order ); +# else + return bitwise_ops::fetch_xor( pDest, val, order ); +# endif + } + static T atomic_fetch_xor_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_xor_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_xor( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_xor( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); + } + }; + + + // 64-bit atomic operations + + template + struct atomic_generic_ops< T, 8, Primary > + { + typedef make_atomic_primary primary; + + // store + static void atomic_store_explicit( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + platform::store64( primary::ptr(pDest), primary::val(v), order ); + } + static void atomic_store_explicit( T * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + platform::store64( primary::ptr(pDest), primary::val(v), order ); + } + static void atomic_store( T volatile * pDest, T v ) CDS_NOEXCEPT + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + static void atomic_store( T * pDest, T v ) CDS_NOEXCEPT + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + + // load + static T atomic_load_explicit( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + return primary::ret( platform::load64( primary::ptr(pSrc), order )); + } + static T atomic_load_explicit( T const * pSrc, memory_order order ) CDS_NOEXCEPT + { + return primary::ret( platform::load64( primary::ptr(pSrc), order )); + } + static T atomic_load( T volatile const * pSrc ) CDS_NOEXCEPT + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + static T atomic_load( T const * pSrc ) CDS_NOEXCEPT + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + + // exchange + static T atomic_exchange_explicit( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT + { + return primary::ret( platform::exchange64( primary::ptr(pDest), primary::val(val), order )); + } + static T atomic_exchange_explicit( T * pDest, T val, memory_order order ) CDS_NOEXCEPT + { + return primary::ret( platform::exchange64( primary::ptr(pDest), primary::val(val), order )); + } + static T atomic_exchange( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_exchange( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + + // cas + static bool atomic_compare_exchange_weak_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas64_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas64_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak( T volatile * pDest, T * expected, T desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_weak( T * pDest, T * expected, T desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas64_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas64_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong( T volatile * pDest, T * expected, T desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong( T * pDest, T * expected, T desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + }; + + + template + struct atomic_integral_ops< T, 8 > + : atomic_generic_ops< T, 8, T > + , atomic_integral_bitwise_ops + { + typedef atomic_integral_bitwise_ops bitwise_ops; + typedef atomic_generic_ops general_ops; + + // fetch_add + static T atomic_fetch_add_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch64_add_defined + return platform::fetch64_add( pDest, val, order ); +# else + T cur = general_ops::atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !general_ops::atomic_compare_exchange_weak_explicit( pDest, &cur, cur + val, order, memory_order_relaxed )); + return cur; +# endif + } + static T atomic_fetch_add_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_add_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_add( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_add( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_sub + static T atomic_fetch_sub_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch64_sub_defined + return platform::fetch64_sub( pDest, val, order ); +# else + T cur = general_ops::atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !general_ops::atomic_compare_exchange_weak_explicit( pDest, &cur, cur - val, order, memory_order_relaxed )); + return cur; +# endif + } + static T atomic_fetch_sub_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_sub_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_sub( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_sub( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_and + static T atomic_fetch_and_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch64_and_defined + return platform::fetch64_and( pDest, val, order ); +# else + return bitwise_ops::fetch_and( pDest, val, order ); +# endif + } + static T atomic_fetch_and_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_and_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_and( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_and( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_or + static T atomic_fetch_or_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch64_or_defined + return platform::fetch64_or( pDest, val, order ); +# else + return bitwise_ops::fetch_or( pDest, val, order ); +# endif + } + static T atomic_fetch_or_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_or_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_or( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_or( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_xor + static T atomic_fetch_xor_explicit(T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch64_xor_defined + return platform::fetch64_xor( pDest, val, order ); +# else + return bitwise_ops::fetch_xor( pDest, val, order ); +# endif + } + static T atomic_fetch_xor_explicit(T * pDest, T val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_xor_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_xor( T volatile * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_xor( T * pDest, T val ) CDS_NOEXCEPT + { + return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); + } + }; + + + // atomic pointer operations + template + struct atomic_pointer_base + { + // store + static void atomic_store_explicit( T * volatile * pDest, T * v, memory_order order ) CDS_NOEXCEPT + { + platform::store_ptr( pDest, v, order ); + } + static void atomic_store_explicit( T * * pDest, T * v, memory_order order ) CDS_NOEXCEPT + { + platform::store_ptr( pDest, v, order ); + } + static void atomic_store( T * volatile * pDest, T * v ) CDS_NOEXCEPT + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + static void atomic_store( T * * pDest, T * v ) CDS_NOEXCEPT + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + + // load + static T * atomic_load_explicit( T * volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + return platform::load_ptr( pSrc, order ); + } + static T * atomic_load_explicit( T * const * pSrc, memory_order order ) CDS_NOEXCEPT + { + return platform::load_ptr( pSrc, order ); + } + static T * atomic_load( T * volatile const * pSrc ) CDS_NOEXCEPT + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + static T * atomic_load( T * const * pSrc ) CDS_NOEXCEPT + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + + // exchange + static T * atomic_exchange_explicit( T * volatile * pDest, T * val, memory_order order ) CDS_NOEXCEPT + { + return platform::exchange_ptr( pDest, val, order ); + } + static T * atomic_exchange_explicit( T * * pDest, T * val, memory_order order ) CDS_NOEXCEPT + { + return platform::exchange_ptr( pDest, val, order ); + } + static T * atomic_exchange( T * volatile * pDest, T * val ) CDS_NOEXCEPT + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + static T * atomic_exchange( T * * pDest, T * val ) CDS_NOEXCEPT + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + + // cas + static bool atomic_compare_exchange_weak_explicit( T * volatile * pDest, T * * expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas_ptr_weak( pDest, *expected, desired, mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak_explicit( T * * pDest, T * * expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas_ptr_weak( pDest, *expected, desired, mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak( T * volatile * pDest, T ** expected, T * desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_weak( T ** pDest, T ** expected, T * desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong_explicit( T * volatile * pDest, T ** expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas_ptr_strong( pDest, *expected, desired, mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong_explicit( T ** pDest, T ** expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + assert( expected != NULL ); + return platform::cas_ptr_strong( pDest, *expected, desired, mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong( T * volatile * pDest, T ** expected, T * desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong( T ** pDest, T ** expected, T * desired ) CDS_NOEXCEPT + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + }; + + template + struct atomic_pointer: public atomic_pointer_base + { + typedef atomic_pointer_base base_class; + // fetch_add + static T * atomic_fetch_add_explicit(T * volatile * pDest, ptrdiff_t val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch_ptr_add_defined + platform::fetch_ptr_add( pDest, val, order ); +# else + T * cur = base_class::atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !base_class::atomic_compare_exchange_weak_explicit( pDest, &cur, cur + val, order, memory_order_relaxed )); + return cur; +# endif + } + static T * atomic_fetch_add_explicit(T * * pDest, ptrdiff_t val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_add_explicit( reinterpret_cast( pDest ), val, order ); + } + static T * atomic_fetch_add( T * volatile * pDest, ptrdiff_t val ) CDS_NOEXCEPT + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + static T * atomic_fetch_add( T ** pDest, ptrdiff_t val ) CDS_NOEXCEPT + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_sub + static T * atomic_fetch_sub_explicit(T * volatile * pDest, ptrdiff_t val, memory_order order) CDS_NOEXCEPT + { +# ifdef CDS_ATOMIC_fetch_ptr_sub_defined + platform::fetch_ptr_sub( pDest, val, order ); +# else + T * cur = base_class::atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !base_class::atomic_compare_exchange_weak_explicit( pDest, &cur, cur - val, order, memory_order_relaxed )); + return cur; +# endif + } + static T * atomic_fetch_sub_explicit(T ** pDest, ptrdiff_t val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_sub_explicit( reinterpret_cast( pDest ), val, order ); + } + static T * atomic_fetch_sub( T volatile * pDest, ptrdiff_t val ) CDS_NOEXCEPT + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + static T * atomic_fetch_sub( T * pDest, ptrdiff_t val ) CDS_NOEXCEPT + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + }; + + template <> + struct atomic_pointer: public atomic_pointer_base + { + typedef atomic_pointer_base base_class; + + // fetch_add + static void * atomic_fetch_add_explicit(void * volatile * pDest, ptrdiff_t val, memory_order order) CDS_NOEXCEPT + { + void * cur = base_class::atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !base_class::atomic_compare_exchange_weak_explicit( pDest, &cur, reinterpret_cast(cur) + val, order, memory_order_relaxed )); + return cur; + } + static void * atomic_fetch_add_explicit(void * * pDest, ptrdiff_t val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_add_explicit( reinterpret_cast( pDest ), val, order ); + } + static void * atomic_fetch_add( void * volatile * pDest, ptrdiff_t val ) CDS_NOEXCEPT + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + static void * atomic_fetch_add( void ** pDest, ptrdiff_t val ) CDS_NOEXCEPT + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_sub + static void * atomic_fetch_sub_explicit(void * volatile * pDest, ptrdiff_t val, memory_order order) CDS_NOEXCEPT + { + void * cur = base_class::atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !base_class::atomic_compare_exchange_weak_explicit( pDest, &cur, reinterpret_cast(cur) - val, order, memory_order_relaxed )); + return cur; + } + static void * atomic_fetch_sub_explicit(void ** pDest, ptrdiff_t val , memory_order order) CDS_NOEXCEPT + { + return atomic_fetch_sub_explicit( reinterpret_cast( pDest ), val, order ); + } + static void * atomic_fetch_sub( void * volatile * pDest, ptrdiff_t val ) CDS_NOEXCEPT + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + static void * atomic_fetch_sub( void ** pDest, ptrdiff_t val ) CDS_NOEXCEPT + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + }; + +#ifndef CDS_CXX11_DELETE_DEFINITION_SUPPORT + class atomic_noncopyable + { + private: + atomic_noncopyable(const atomic_noncopyable&); + atomic_noncopyable& operator=(const atomic_noncopyable&); + //atomic_noncopyable& operator=(const atomic_noncopyable&) volatile; + protected: +# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT + atomic_noncopyable() = default; +# else + atomic_noncopyable() + {} +# endif + }; +#endif + + template + struct atomic_integral +#ifndef CDS_CXX11_DELETE_DEFINITION_SUPPORT + : atomic_noncopyable +#endif + { + private: + typename cds::details::aligned_type::type volatile m_val; + //T volatile m_val; + typedef atomic_integral_ops atomic_ops; + public: + typedef T atomic_type; + public: + bool is_lock_free() const volatile CDS_NOEXCEPT + { + return true; + } + bool is_lock_free() const CDS_NOEXCEPT + { + return true; + } + void store(T val, memory_order order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + atomic_ops::atomic_store_explicit( &m_val, val, order ); + } + void store(T val, memory_order order = memory_order_seq_cst) CDS_NOEXCEPT + { + atomic_ops::atomic_store_explicit( &m_val, val, order ); + } + + T load(memory_order order = memory_order_seq_cst) const volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_load_explicit( &m_val, order ); + } + T load(memory_order order = memory_order_seq_cst) const CDS_NOEXCEPT + { + return atomic_ops::atomic_load_explicit( &m_val, order ); + } + + operator T() const volatile CDS_NOEXCEPT + { + return load(); + } + operator T() const CDS_NOEXCEPT + { + return load(); + } + + T exchange(T val, memory_order order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_exchange_explicit( &m_val, val, order ); + } + T exchange(T val, memory_order order = memory_order_seq_cst) CDS_NOEXCEPT + { + return atomic_ops::atomic_exchange_explicit( &m_val, val, order ); + } + + bool compare_exchange_weak(T& expected, T desired , memory_order success_order, memory_order failure_order) volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_compare_exchange_weak_explicit( &m_val, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_weak(T& expected, T desired , memory_order success_order, memory_order failure_order) CDS_NOEXCEPT + { + return atomic_ops::atomic_compare_exchange_weak_explicit( &m_val, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_strong(T& expected, T desired , memory_order success_order, memory_order failure_order) volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_compare_exchange_strong_explicit( &m_val, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_strong(T& expected, T desired , memory_order success_order, memory_order failure_order) CDS_NOEXCEPT + { + return atomic_ops::atomic_compare_exchange_strong_explicit( &m_val, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_weak(T& expected, T desired , memory_order success_order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return compare_exchange_weak( expected, desired, success_order, memory_order_relaxed ); + } + bool compare_exchange_weak(T& expected, T desired , memory_order success_order = memory_order_seq_cst) CDS_NOEXCEPT + { + return compare_exchange_weak( expected, desired, success_order, memory_order_relaxed ); + } + bool compare_exchange_strong(T& expected, T desired , memory_order success_order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return compare_exchange_strong( expected, desired, success_order, memory_order_relaxed ); + } + bool compare_exchange_strong(T& expected, T desired , memory_order success_order = memory_order_seq_cst) CDS_NOEXCEPT + { + return compare_exchange_strong( expected, desired, success_order, memory_order_relaxed ); + } + + T fetch_add(T val, memory_order order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_fetch_add_explicit( &m_val, val, order ); + } + T fetch_add(T val, memory_order order = memory_order_seq_cst) CDS_NOEXCEPT + { + return atomic_ops::atomic_fetch_add_explicit( &m_val, val, order ); + } + T fetch_sub(T val, memory_order order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_fetch_sub_explicit( &m_val, val, order ); + } + T fetch_sub(T val, memory_order order = memory_order_seq_cst) CDS_NOEXCEPT + { + return atomic_ops::atomic_fetch_sub_explicit( &m_val, val, order ); + } + T fetch_and(T val, memory_order order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_fetch_and_explicit( &m_val, val, order ); + } + T fetch_and(T val, memory_order order = memory_order_seq_cst) CDS_NOEXCEPT + { + return atomic_ops::atomic_fetch_and_explicit( &m_val, val, order ); + } + + T fetch_or(T val, memory_order order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_fetch_or_explicit( &m_val, val, order ); + } + T fetch_or(T val, memory_order order = memory_order_seq_cst) CDS_NOEXCEPT + { + return atomic_ops::atomic_fetch_or_explicit( &m_val, val, order ); + } + T fetch_xor(T val, memory_order order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_fetch_xor_explicit( &m_val, val, order ); + } + T fetch_xor(T val, memory_order order = memory_order_seq_cst) CDS_NOEXCEPT + { + return atomic_ops::atomic_fetch_xor_explicit( &m_val, val, order ); + } + +#ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT + atomic_integral() = default; +#else + atomic_integral() CDS_NOEXCEPT + {} +#endif + CDS_CONSTEXPR atomic_integral(T val) CDS_NOEXCEPT + : m_val(val) + {} + +#ifdef CDS_CXX11_DELETE_DEFINITION_SUPPORT + atomic_integral(const atomic_integral&) = delete; + atomic_integral& operator=(const atomic_integral&) = delete; + atomic_integral& operator=(const atomic_integral&) volatile = delete; +#endif + T operator=(T val) volatile CDS_NOEXCEPT + { + store(val); + return val; + } + T operator=(T val) CDS_NOEXCEPT + { + store(val); + return val; + } + + // Post inc/dec + T operator++(int) volatile CDS_NOEXCEPT + { + return fetch_add( 1 ); + } + T operator++(int) CDS_NOEXCEPT + { + return fetch_add( 1 ); + } + T operator--(int) volatile CDS_NOEXCEPT + { + return fetch_sub( 1 ); + } + T operator--(int) CDS_NOEXCEPT + { + return fetch_sub( 1 ); + } + + // Pre inc/dec + T operator++() volatile CDS_NOEXCEPT + { + return fetch_add( 1 ) + 1; + } + T operator++() CDS_NOEXCEPT + { + return fetch_add( 1 ) + 1; + } + T operator--() volatile CDS_NOEXCEPT + { + return fetch_sub( 1 ) - 1; + } + T operator--() CDS_NOEXCEPT + { + return fetch_sub( 1 ) - 1; + } + + // op= + T operator+=(T val) volatile CDS_NOEXCEPT + { + return fetch_add( val ) + val; + } + T operator+=(T val) CDS_NOEXCEPT + { + return fetch_add( val ) + val; + } + T operator-=(T val) volatile CDS_NOEXCEPT + { + return fetch_sub( val ) - val; + } + T operator-=(T val) CDS_NOEXCEPT + { + return fetch_sub( val ) - val; + } + T operator&=(T val) volatile CDS_NOEXCEPT + { + return fetch_and( val ) & val; + } + T operator&=(T val) CDS_NOEXCEPT + { + return fetch_and( val ) & val; + } + T operator|=(T val) volatile CDS_NOEXCEPT + { + return fetch_or( val ) | val; + } + T operator|=(T val) CDS_NOEXCEPT + { + return fetch_or( val ) | val; + } + T operator^=(T val) volatile CDS_NOEXCEPT + { + return fetch_xor( val ) ^ val; + } + T operator^=(T val) CDS_NOEXCEPT + { + return fetch_xor( val ) ^ val; + } + }; + + template + struct select_primary_type { + typedef typename details::primary_type::type type; + }; + template <> + struct select_primary_type { + typedef bool type; + }; + + } // namespace details + + template + struct atomic +#ifndef CDS_CXX11_DELETE_DEFINITION_SUPPORT + : details::atomic_noncopyable +#endif + { + private: + typedef details::atomic_generic_ops::type > atomic_ops; + + T volatile m_data; + public: + bool is_lock_free() const volatile CDS_NOEXCEPT + { + return true; + } + bool is_lock_free() const CDS_NOEXCEPT + { + return true; + } + + void store(T val, memory_order order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + atomic_ops::atomic_store_explicit( &m_data, val, order ); + } + void store(T val, memory_order order = memory_order_seq_cst) CDS_NOEXCEPT + { + atomic_ops::atomic_store_explicit( &m_data, val, order ); + } + + T load(memory_order order = memory_order_seq_cst) const volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_load_explicit( &m_data, order ); + } + T load(memory_order order = memory_order_seq_cst) const CDS_NOEXCEPT + { + return atomic_ops::atomic_load_explicit( &m_data, order ); + } + + operator T() const volatile CDS_NOEXCEPT + { + return load(); + } + operator T() const CDS_NOEXCEPT + { + return load(); + } + + T exchange(T val, memory_order order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_exchange_explicit( &m_data, val, order ); + } + T exchange(T val, memory_order order = memory_order_seq_cst) CDS_NOEXCEPT + { + return atomic_ops::atomic_exchange_explicit( &m_data, val, order ); + } + + bool compare_exchange_weak(T& expected, T desired, memory_order success_order, memory_order failure_order) volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_compare_exchange_weak_explicit( &m_data, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_weak(T& expected, T desired, memory_order success_order, memory_order failure_order) CDS_NOEXCEPT + { + return atomic_ops::atomic_compare_exchange_weak_explicit( &m_data, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_strong(T& expected, T desired, memory_order success_order, memory_order failure_order) volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_compare_exchange_strong_explicit( &m_data, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_strong(T& expected, T desired, memory_order success_order, memory_order failure_order) CDS_NOEXCEPT + { + return atomic_ops::atomic_compare_exchange_strong_explicit( &m_data, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_weak(T& expected, T desired, memory_order success_order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return compare_exchange_weak( expected, desired, success_order, memory_order_relaxed ); + } + bool compare_exchange_weak(T& expected, T desired, memory_order success_order = memory_order_seq_cst) CDS_NOEXCEPT + { + return compare_exchange_weak( expected, desired, success_order, memory_order_relaxed ); + } + bool compare_exchange_strong(T& expected, T desired, memory_order success_order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return compare_exchange_strong( expected, desired, success_order, memory_order_relaxed ); + } + bool compare_exchange_strong(T& expected, T desired, memory_order success_order = memory_order_seq_cst) CDS_NOEXCEPT + { + return compare_exchange_strong( expected, desired, success_order, memory_order_relaxed ); + } + +#ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT + atomic() = default; +#else + atomic() + {} +#endif + CDS_CONSTEXPR atomic(T val) + : m_data( val ) + {} + +#ifdef CDS_CXX11_DELETE_DEFINITION_SUPPORT + atomic(const atomic&) = delete; + atomic& operator=(const atomic&) = delete; + atomic& operator=(const atomic&) volatile = delete; +#endif + + T operator=(T val) volatile CDS_NOEXCEPT + { + store( val ); + return val; + } + T operator=(T val) CDS_NOEXCEPT + { + store( val ); + return val; + } + }; + +#if defined(CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT) && defined(CDS_CXX11_DELETE_DEFINITION_SUPPORT) +# define CDS_DECLARE_ATOMIC_INTEGRAL( _type ) \ + template <> \ + struct atomic<_type>: public details::atomic_integral<_type> \ + { \ + private: \ + typedef details::atomic_integral<_type> base_class ; \ + public: \ + atomic() = default; \ + atomic(_type val) CDS_NOEXCEPT : base_class(val) {} \ + atomic(const atomic&) = delete; \ + atomic& operator=(const atomic&) = delete; \ + atomic& operator=(const atomic&) volatile = delete; \ + _type operator=(_type val) volatile CDS_NOEXCEPT { return base_class::operator=(val); } \ + _type operator=(_type val) CDS_NOEXCEPT { return base_class::operator=(val); } \ + }; +#else +# define CDS_DECLARE_ATOMIC_INTEGRAL( _type ) \ + template <> \ + struct atomic<_type>: public details::atomic_integral<_type> \ + { \ + private: \ + typedef details::atomic_integral<_type> base_class ; \ + public: \ + atomic() {} \ + atomic(_type val) CDS_NOEXCEPT : base_class(val) {} \ + _type operator=(_type val) volatile CDS_NOEXCEPT { return base_class::operator=(val); } \ + _type operator=(_type val) CDS_NOEXCEPT { return base_class::operator=(val); } \ + }; +#endif + + CDS_DECLARE_ATOMIC_INTEGRAL(char) + CDS_DECLARE_ATOMIC_INTEGRAL(signed char) + CDS_DECLARE_ATOMIC_INTEGRAL(unsigned char) + CDS_DECLARE_ATOMIC_INTEGRAL(short) + CDS_DECLARE_ATOMIC_INTEGRAL(unsigned short) + CDS_DECLARE_ATOMIC_INTEGRAL(int) + CDS_DECLARE_ATOMIC_INTEGRAL(unsigned int) + CDS_DECLARE_ATOMIC_INTEGRAL(long) + CDS_DECLARE_ATOMIC_INTEGRAL(unsigned long) + CDS_DECLARE_ATOMIC_INTEGRAL(long long) + CDS_DECLARE_ATOMIC_INTEGRAL(unsigned long long) +//#if CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION >= 40400 +// CDS_DECLARE_ATOMIC_INTEGRAL(char16_t) +// CDS_DECLARE_ATOMIC_INTEGRAL(char32_t) +//#endif +// CDS_DECLARE_ATOMIC_INTEGRAL(wchar_t) + +# undef CDS_DECLARE_ATOMIC_INTEGRAL + + + template + class atomic +#ifndef CDS_CXX11_DELETE_DEFINITION_SUPPORT + : details::atomic_noncopyable +#endif + { + private: + T * volatile m_ptr; + typedef details::atomic_pointer atomic_ops; + public: + bool is_lock_free() const volatile CDS_NOEXCEPT + { + return true; + } + bool is_lock_free() const CDS_NOEXCEPT + { + return true; + } + + void store(T * val, memory_order order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + atomic_ops::atomic_store_explicit( &m_ptr, val, order ); + } + void store(T * val, memory_order order = memory_order_seq_cst) CDS_NOEXCEPT + { + atomic_ops::atomic_store_explicit( &m_ptr, val, order ); + } + + T * load(memory_order order = memory_order_seq_cst) const volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_load_explicit( &m_ptr, order ); + } + T * load(memory_order order = memory_order_seq_cst) const CDS_NOEXCEPT + { + return atomic_ops::atomic_load_explicit( &m_ptr, order ); + } + + operator T *() const volatile CDS_NOEXCEPT + { + return load(); + } + operator T *() const CDS_NOEXCEPT + { + return load(); + } + + T * exchange(T * val, memory_order order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_exchange_explicit( &m_ptr, val, order ); + } + T * exchange(T * val, memory_order order = memory_order_seq_cst) CDS_NOEXCEPT + { + return atomic_ops::atomic_exchange_explicit( &m_ptr, val, order ); + } + + bool compare_exchange_weak(T *& expected, T * desired, memory_order success_order, memory_order failure_order) volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_compare_exchange_weak_explicit( &m_ptr, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_weak(T *& expected, T * desired, memory_order success_order, memory_order failure_order) CDS_NOEXCEPT + { + return atomic_ops::atomic_compare_exchange_weak_explicit( &m_ptr, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_strong(T *& expected, T * desired, memory_order success_order, memory_order failure_order) volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_compare_exchange_strong_explicit( &m_ptr, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_strong(T *& expected, T * desired, memory_order success_order, memory_order failure_order) CDS_NOEXCEPT + { + return atomic_ops::atomic_compare_exchange_strong_explicit( &m_ptr, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_weak(T *& expected, T * desired, memory_order success_order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return compare_exchange_weak( expected, desired, success_order, memory_order_relaxed ); + } + bool compare_exchange_weak(T *& expected, T * desired, memory_order success_order = memory_order_seq_cst) CDS_NOEXCEPT + { + return compare_exchange_weak( expected, desired, success_order, memory_order_relaxed ); + } + bool compare_exchange_strong(T *& expected, T * desired, memory_order success_order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return compare_exchange_strong( expected, desired, success_order, memory_order_relaxed ); + } + bool compare_exchange_strong(T *& expected, T * desired, memory_order success_order = memory_order_seq_cst) CDS_NOEXCEPT + { + return compare_exchange_strong( expected, desired, success_order, memory_order_relaxed ); + } + + T * fetch_add(ptrdiff_t offset, memory_order order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_fetch_add_explicit( &m_ptr, offset, order ); + } + T * fetch_add(ptrdiff_t offset, memory_order order = memory_order_seq_cst) CDS_NOEXCEPT + { + return atomic_ops::atomic_fetch_add_explicit( &m_ptr, offset, order ); + } + + T * fetch_sub(ptrdiff_t offset, memory_order order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return atomic_ops::atomic_fetch_sub_explicit( &m_ptr, offset, order ); + } + T * fetch_sub(ptrdiff_t offset, memory_order order = memory_order_seq_cst) CDS_NOEXCEPT + { + return atomic_ops::atomic_fetch_sub_explicit( &m_ptr, offset, order ); + } + +#ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT + atomic() = default; +#else + atomic() CDS_NOEXCEPT + {} +#endif + CDS_CONSTEXPR atomic(T * val) CDS_NOEXCEPT + : m_ptr( val ) + {} + +#ifdef CDS_CXX11_DELETE_DEFINITION_SUPPORT + atomic(const atomic&) = delete; + atomic& operator=(const atomic&) = delete; + atomic& operator=(const atomic&) volatile = delete; +#endif + + T * operator=(T * val) volatile CDS_NOEXCEPT + { + store( val ); + return val; + } + T * operator=(T * val) CDS_NOEXCEPT + { + store( val ); + return val; + } + }; + + // Atomic typedefs + typedef atomic atomic_bool; + typedef atomic atomic_char; + typedef atomic atomic_schar; + typedef atomic atomic_uchar; + typedef atomic atomic_short; + typedef atomic atomic_ushort; + typedef atomic atomic_int; + typedef atomic atomic_uint; + typedef atomic atomic_long; + typedef atomic atomic_ulong; + typedef atomic atomic_llong; + typedef atomic atomic_ullong; +#if ( CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION >= 40400 ) || CDS_COMPILER == CDS_COMPILER_CLANG + typedef atomic atomic_char16_t; + typedef atomic atomic_char32_t; +#endif + typedef atomic atomic_wchar_t; + + + typedef atomic atomic_int_least8_t; + typedef atomic atomic_uint_least8_t; + typedef atomic atomic_int_least16_t; + typedef atomic atomic_uint_least16_t; + typedef atomic atomic_int_least32_t; + typedef atomic atomic_uint_least32_t; + typedef atomic atomic_int_least64_t; + typedef atomic atomic_uint_least64_t; + typedef atomic atomic_int_fast8_t; + typedef atomic atomic_uint_fast8_t; + typedef atomic atomic_int_fast16_t; + typedef atomic atomic_uint_fast16_t; + typedef atomic atomic_int_fast32_t; + typedef atomic atomic_uint_fast32_t; + typedef atomic atomic_int_fast64_t; + typedef atomic atomic_uint_fast64_t; + typedef atomic atomic_intptr_t; + typedef atomic atomic_uintptr_t; + typedef atomic atomic_size_t; + typedef atomic atomic_ptrdiff_t; + typedef atomic atomic_intmax_t; + typedef atomic atomic_uintmax_t; + + template + static inline bool atomic_is_lock_free(const volatile atomic * p) CDS_NOEXCEPT + { + return p->is_lock_free(); + } + + template + static inline bool atomic_is_lock_free(const atomic * p ) CDS_NOEXCEPT + { + return p->is_lock_free(); + } + + /* + template + static inline void atomic_init(volatile atomic * p, T val) CDS_NOEXCEPT + { + p->init( val ); + } + + template + static inline void atomic_init( atomic * p, T val) CDS_NOEXCEPT + { + p->init( val ); + } + */ + + template + static inline void atomic_store(volatile atomic* p, T val) CDS_NOEXCEPT + { + p->store(val); + } + template + static inline void atomic_store(atomic* p, T val) CDS_NOEXCEPT + { + p->store( val ); + } + + template + static inline void atomic_store_explicit(volatile atomic* p, T val, memory_order order) CDS_NOEXCEPT + { + p->store( val, order ); + } + template + static inline void atomic_store_explicit(atomic* p, T val, memory_order order) CDS_NOEXCEPT + { + p->store( val, order ); + } + + template + static inline T atomic_load(const volatile atomic* p) CDS_NOEXCEPT + { + return p->load(); + } + template + static inline T atomic_load(const atomic* p) CDS_NOEXCEPT + { + return p->load(); + } + + template + static inline T atomic_load_explicit(const volatile atomic* p, memory_order order) CDS_NOEXCEPT + { + return p->load( order ); + } + template + static inline T atomic_load_explicit(const atomic* p, memory_order order) CDS_NOEXCEPT + { + return p->load( order ); + } + + template + static inline T atomic_exchange(volatile atomic* p, T val) CDS_NOEXCEPT + { + return p->exchange( val ); + } + template + static inline T atomic_exchange(atomic* p, T val ) CDS_NOEXCEPT + { + return p->exchange( val ); + } + + template + static inline T atomic_exchange_explicit(volatile atomic* p, T val, memory_order order) CDS_NOEXCEPT + { + return p->exchange( val, order ); + } + template + static inline T atomic_exchange_explicit(atomic* p, T val, memory_order order) CDS_NOEXCEPT + { + return p->exchange( val, order ); + } + + template + static inline bool atomic_compare_exchange_weak(volatile atomic* p, T* expected, T desired) CDS_NOEXCEPT + { + return p->compare_exchange_weak( *expected, desired ); + } + template + static inline bool atomic_compare_exchange_weak(atomic* p, T* expected, T desired) CDS_NOEXCEPT + { + return p->compare_exchange_weak( *expected, desired ); + } + + template + static inline bool atomic_compare_exchange_strong(volatile atomic* p, T* expected, T desired) CDS_NOEXCEPT + { + return p->compare_exchange_strong( *expected, desired ); + } + template + static inline bool atomic_compare_exchange_strong(atomic* p, T* expected, T desired) CDS_NOEXCEPT + { + return p->compare_exchange_strong( *expected, desired ); + } + + template + static inline bool atomic_compare_exchange_weak_explicit(volatile atomic* p, T* expected, T desired, memory_order success_order, memory_order failure_order) CDS_NOEXCEPT + { + return p->compare_exchange_weak( *expected, desired, success_order, failure_order ); + } + template + static inline bool atomic_compare_exchange_weak_explicit(atomic* p, T* expected, T desired, memory_order success_order, memory_order failure_order) CDS_NOEXCEPT + { + return p->compare_exchange_weak( *expected, desired, success_order, failure_order ); + } + + template + static inline bool atomic_compare_exchange_strong_explicit(volatile atomic* p, T* expected, T desired, memory_order success_order, memory_order failure_order) CDS_NOEXCEPT + { + return p->compare_exchange_strong( *expected, desired, success_order, failure_order ); + } + template + static inline bool atomic_compare_exchange_strong_explicit(atomic* p, T* expected, T desired, memory_order success_order, memory_order failure_order) CDS_NOEXCEPT + { + return p->compare_exchange_strong( *expected, desired, success_order, failure_order ); + } + + template + static inline T atomic_fetch_add(volatile atomic* p, T val) CDS_NOEXCEPT + { + return p->fetch_add( val ); + } + template + static inline T atomic_fetch_add(atomic* p, T val) CDS_NOEXCEPT + { + return p->fetch_add( val ); + } + template + static inline T * atomic_fetch_add(volatile atomic* p, ptrdiff_t offset) CDS_NOEXCEPT + { + return p->fetch_add( offset ); + } + template + static inline T * atomic_fetch_add(atomic* p, ptrdiff_t offset) CDS_NOEXCEPT + { + return p->fetch_add( offset ); + } + + template + static inline T atomic_fetch_add_explicit(volatile atomic* p, T val, memory_order order) CDS_NOEXCEPT + { + return p->fetch_add( val, order ); + } + template + static inline T atomic_fetch_add_explicit(atomic* p, T val, memory_order order) CDS_NOEXCEPT + { + return p->fetch_add( val, order ); + } + template + static inline T * atomic_fetch_add_explicit(volatile atomic* p, ptrdiff_t offset, memory_order order) CDS_NOEXCEPT + { + return p->fetch_add( offset, order ); + } + template + static inline T * atomic_fetch_add_explicit(atomic* p, ptrdiff_t offset, memory_order order) CDS_NOEXCEPT + { + return p->fetch_add( offset, order ); + } + + template + static inline T atomic_fetch_sub(volatile atomic* p, T val) CDS_NOEXCEPT + { + return p->fetch_sub( val ); + } + template + static inline T atomic_fetch_sub(atomic* p, T val) CDS_NOEXCEPT + { + return p->fetch_sub( val ); + } + template + static inline T * atomic_fetch_sub(volatile atomic* p, ptrdiff_t offset) CDS_NOEXCEPT + { + return p->fetch_sub( offset ); + } + template + static inline T * atomic_fetch_sub(atomic* p, ptrdiff_t offset) CDS_NOEXCEPT + { + return p->fetch_sub( offset ); + } + + template + static inline T atomic_fetch_sub_explicit(volatile atomic* p, T val, memory_order order) CDS_NOEXCEPT + { + return p->fetch_sub( val, order ); + } + template + static inline T atomic_fetch_sub_explicit(atomic* p, T val, memory_order order) CDS_NOEXCEPT + { + return p->fetch_sub( val, order ); + } + template + static inline T * atomic_fetch_sub_explicit(volatile atomic* p, ptrdiff_t offset, memory_order order) CDS_NOEXCEPT + { + return p->fetch_sub( offset, order ); + } + template + static inline T * atomic_fetch_sub_explicit(atomic* p, ptrdiff_t offset, memory_order order) CDS_NOEXCEPT + { + return p->fetch_sub( offset, order ); + } + + template + static inline T atomic_fetch_and(volatile atomic* p, T val) CDS_NOEXCEPT + { + return p->fetch_and( val ); + } + template + static inline T atomic_fetch_and(atomic* p, T val) CDS_NOEXCEPT + { + return p->fetch_and( val ); + } + + template + static inline T atomic_fetch_and_explicit(volatile atomic* p, T val, memory_order order) CDS_NOEXCEPT + { + return p->fetch_and( val, order ); + } + template + static inline T atomic_fetch_and_explicit(atomic* p, T val, memory_order order) CDS_NOEXCEPT + { + return p->fetch_and( val, order ); + } + + template + static inline T atomic_fetch_or(volatile atomic* p, T val) CDS_NOEXCEPT + { + return p->fetch_or( val ); + } + template + static inline T atomic_fetch_or(atomic* p, T val) CDS_NOEXCEPT + { + return p->fetch_or( val ); + } + + template + static inline T atomic_fetch_or_explicit(volatile atomic* p, T val, memory_order order) CDS_NOEXCEPT + { + return p->fetch_or( val, order ); + } + template + static inline T atomic_fetch_or_explicit(atomic* p, T val, memory_order order) CDS_NOEXCEPT + { + return p->fetch_or( val, order ); + } + + template + static inline T atomic_fetch_xor(volatile atomic* p, T val) CDS_NOEXCEPT + { + return p->fetch_xor( val ); + } + template + static inline T atomic_fetch_xor(atomic* p, T val) CDS_NOEXCEPT + { + return p->fetch_xor( val ); + } + + template + static inline T atomic_fetch_xor_explicit(volatile atomic* p, T val, memory_order order) CDS_NOEXCEPT + { + return p->fetch_xor( val, order ); + } + template + static inline T atomic_fetch_xor_explicit(atomic* p, T val, memory_order order) CDS_NOEXCEPT + { + return p->fetch_xor( val, order ); + } + + // Atomic flag type + typedef struct atomic_flag + { + void clear( memory_order order = memory_order_seq_cst ) volatile CDS_NOEXCEPT + { + assert( order != memory_order_acquire + && order != memory_order_acq_rel + && order != memory_order_consume + ); + platform::atomic_flag_clear( &m_Flag, order ); + } + void clear( memory_order order = memory_order_seq_cst ) CDS_NOEXCEPT + { + assert( order != memory_order_acquire + && order != memory_order_acq_rel + && order != memory_order_consume + ); + platform::atomic_flag_clear( &m_Flag, order ); + } + + bool test_and_set( memory_order order = memory_order_seq_cst ) volatile CDS_NOEXCEPT + { + return platform::atomic_flag_tas( &m_Flag, order ); + } + bool test_and_set( memory_order order = memory_order_seq_cst ) CDS_NOEXCEPT + { + return platform::atomic_flag_tas( &m_Flag, order ); + } + +#ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT + atomic_flag() = default; +#elif CDS_COMPILER != CDS_COMPILER_MSVC + // MS VC generate error C2552 "non-aggregates cannot be initialized with initializer list" + // when atomic_flag initializes with ATOMIC_FLAG_INIT + atomic_flag() + {} +#endif + +#ifdef CDS_CXX11_DELETE_DEFINITION_SUPPORT + atomic_flag(const atomic_flag&) = delete; + atomic_flag& operator=(const atomic_flag&) = delete; + atomic_flag& operator=(const atomic_flag&) volatile = delete; +#elif CDS_COMPILER != CDS_COMPILER_MSVC + // MS VC generate error C2552 "non-aggregates cannot be initialized with initializer list" + // when atomic_flag initializes with ATOMIC_FLAG_INIT + private: + atomic_flag(const atomic_flag&); + atomic_flag& operator=(const atomic_flag&); + atomic_flag& operator=(const atomic_flag&) volatile; + public: +#endif + + platform::atomic_flag_type volatile m_Flag; + } atomic_flag; + + static inline bool atomic_flag_test_and_set(volatile atomic_flag* p) CDS_NOEXCEPT + { + return p->test_and_set(); + } + static inline bool atomic_flag_test_and_set(atomic_flag * p) CDS_NOEXCEPT + { + return p->test_and_set(); + } + static inline bool atomic_flag_test_and_set_explicit(volatile atomic_flag* p, memory_order order) CDS_NOEXCEPT + { + return p->test_and_set( order ); + } + static inline bool atomic_flag_test_and_set_explicit(atomic_flag* p, memory_order order) CDS_NOEXCEPT + { + return p->test_and_set( order ); + } + static inline void atomic_flag_clear(volatile atomic_flag* p) CDS_NOEXCEPT + { + return p->clear(); + } + static inline void atomic_flag_clear(atomic_flag* p) CDS_NOEXCEPT + { + return p->clear(); + } + static inline void atomic_flag_clear_explicit(volatile atomic_flag* p, memory_order order) CDS_NOEXCEPT + { + return p->clear( order ); + } + static inline void atomic_flag_clear_explicit(atomic_flag* p, memory_order order) CDS_NOEXCEPT + { + return p->clear( order ); + } + + // Fences + static inline void atomic_thread_fence(memory_order order) CDS_NOEXCEPT + { + platform::thread_fence( order ); + CDS_COMPILER_RW_BARRIER; + } + static inline void atomic_signal_fence(memory_order order) CDS_NOEXCEPT + { + platform::signal_fence( order ); + } + +}} // namespace cds::cxx11_atomics + +//@endcond +#endif // #ifndef __CDS_COMPILER_CXX11_ATOMIC_H diff --git a/cds/compiler/cxx11_atomic_patches.h b/cds/compiler/cxx11_atomic_patches.h new file mode 100644 index 00000000..56db30cb --- /dev/null +++ b/cds/compiler/cxx11_atomic_patches.h @@ -0,0 +1,13 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_CXX11_ATOMIC_PATCHES_H +#define __CDS_COMPILER_CXX11_ATOMIC_PATCHES_H + +#if CDS_COMPILER == CDS_COMPILER_GCC +# include +#elif CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS +# include +#endif + + +#endif // #ifndef __CDS_COMPILER_CXX11_ATOMIC_PATCHES_H diff --git a/cds/compiler/cxx11_atomic_prepatches.h b/cds/compiler/cxx11_atomic_prepatches.h new file mode 100644 index 00000000..3729a079 --- /dev/null +++ b/cds/compiler/cxx11_atomic_prepatches.h @@ -0,0 +1,10 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_CXX11_ATOMIC_PREPATCHES_H +#define __CDS_COMPILER_CXX11_ATOMIC_PREPATCHES_H + +#if CDS_COMPILER == CDS_COMPILER_CLANG +# include +#endif + +#endif // #ifndef __CDS_COMPILER_CXX11_ATOMIC_PREPATCHES_H diff --git a/cds/compiler/defs.h b/cds/compiler/defs.h new file mode 100644 index 00000000..059cc4a1 --- /dev/null +++ b/cds/compiler/defs.h @@ -0,0 +1,28 @@ +//$$CDS-header$$ + +#ifndef __CDS_ARH_COMPILER_DEFS_H +#define __CDS_ARH_COMPILER_DEFS_H + +#if CDS_COMPILER == CDS_COMPILER_MSVC +# include +#elif CDS_COMPILER == CDS_COMPILER_GCC +# include +#elif CDS_COMPILER == CDS_COMPILER_INTEL +# include +#elif CDS_COMPILER == CDS_COMPILER_CLANG +# include +#elif CDS_COMPILER == CDS_COMPILER_UNKNOWN +# error Unknown compiler. Compilation aborted +#else +# error Unknown value of CDS_COMPILER macro +#endif + +#ifndef CDS_STDCALL +# define CDS_STDCALL +#endif + +#ifndef CDS_EXPORT_API +# define CDS_EXPORT_API +#endif + +#endif // #ifndef __CDS_ARH_COMPILER_DEFS_H diff --git a/cds/compiler/gcc/amd64/backoff.h b/cds/compiler/gcc/amd64/backoff.h new file mode 100644 index 00000000..4023f1f9 --- /dev/null +++ b/cds/compiler/gcc/amd64/backoff.h @@ -0,0 +1,39 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_AMD64_BACKOFF_H +#define __CDS_COMPILER_GCC_AMD64_BACKOFF_H + +//@cond none + +namespace cds { namespace backoff { + namespace gcc { namespace amd64 { + +# define CDS_backoff_pause_defined + static inline void backoff_pause( unsigned int nLoop = 0x000003FF ) + { + asm volatile ( + "andl %[nLoop], %%ecx; \n\t" + "cmovzl %[nLoop], %%ecx; \n\t" + "rep; " + "nop; \n\t" + : /*no output*/ + : [nLoop] "r" (nLoop) + : "ecx", "cc" + ); + } + +# define CDS_backoff_nop_defined + static inline void backoff_nop() + { + asm volatile ( "nop;" ); + } + + }} // namespace gcc::amd64 + + namespace platform { + using namespace gcc::amd64; + } +}} // namespace cds::backoff + +//@endcond +#endif // #ifndef __CDS_COMPILER_GCC_AMD64_BACKOFF_H diff --git a/cds/compiler/gcc/amd64/bitop.h b/cds/compiler/gcc/amd64/bitop.h new file mode 100644 index 00000000..b83384cf --- /dev/null +++ b/cds/compiler/gcc/amd64/bitop.h @@ -0,0 +1,157 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_AMD64_BITOP_H +#define __CDS_COMPILER_GCC_AMD64_BITOP_H + +//@cond none +namespace cds { + namespace bitop { namespace platform { namespace gcc { namespace amd64 { + // MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0 +# define cds_bitop_msb32_DEFINED + static inline int msb32( atomic32u_t nArg ) + { + int nRet; + __asm__ __volatile__ ( + "bsrl %[nArg], %[nRet] ;\n\t" + "jnz 1f ;\n\t" + "xorl %[nRet], %[nRet] ;\n\t" + "subl $1, %[nRet] ;\n\t" + "1:" + "addl $1, %[nRet] ;\n\t" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return nRet; + } + +# define cds_bitop_msb32nz_DEFINED + static inline int msb32nz( atomic32u_t nArg ) + { + assert( nArg != 0 ); + int nRet; + __asm__ __volatile__ ( + "bsrl %[nArg], %[nRet] ;" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return nRet; + } + + // LSB - return index (0..31) of least significant bit in nArg. If nArg == 0 return -1U +# define cds_bitop_lsb32_DEFINED + static inline int lsb32( atomic32u_t nArg ) + { + + int nRet; + __asm__ __volatile__ ( + "bsfl %[nArg], %[nRet] ;" + "jnz 1f ;" + "xorl %[nRet], %[nRet] ;" + "subl $1, %[nRet] ;" + "1:" + "addl $1, %[nRet] ;" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return nRet; + + } + + // LSB - return index (0..31) of least significant bit in nArg. + // Condition: nArg != 0 +# define cds_bitop_lsb32nz_DEFINED + static inline int lsb32nz( atomic32u_t nArg ) + { + assert( nArg != 0 ); + int nRet; + __asm__ __volatile__ ( + "bsfl %[nArg], %[nRet] ;" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return nRet; + } + +# define cds_bitop_msb64_DEFINED + static inline int msb64( atomic64u_unaligned nArg ) + { + atomic64u_unaligned nRet; + asm volatile ( + "bsrq %[nArg], %[nRet] ;\n\t" + "jnz 1f ;\n\t" + "xorq %[nRet], %[nRet] ;\n\t" + "subq $1, %[nRet] ;\n\t" + "1:" + "addq $1, %[nRet] ;\n\t" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return (int) nRet; + } + +# define cds_bitop_msb64nz_DEFINED + static inline int msb64nz( atomic64u_unaligned nArg ) + { + assert( nArg != 0 ); + atomic64u_unaligned nRet; + __asm__ __volatile__ ( + "bsrq %[nArg], %[nRet] ;" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return (int) nRet; + } + + // LSB - return index (0..31) of least significant bit in nArg. If nArg == 0 return -1U +# define cds_bitop_lsb64_DEFINED + static inline int lsb64( atomic64u_unaligned nArg ) + { + + atomic64u_unaligned nRet; + __asm__ __volatile__ ( + "bsfq %[nArg], %[nRet] ;" + "jnz 1f ;" + "xorq %[nRet], %[nRet] ;" + "subq $1, %[nRet] ;" + "1:" + "addq $1, %[nRet] ;" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return (int) nRet; + + } + + // LSB - return index (0..31) of least significant bit in nArg. + // Condition: nArg != 0 +# define cds_bitop_lsb64nz_DEFINED + static inline int lsb64nz( atomic64u_unaligned nArg ) + { + assert( nArg != 0 ); + atomic64u_unaligned nRet; + __asm__ __volatile__ ( + "bsfq %[nArg], %[nRet] ;" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return (int) nRet; + } + + + }} // namespace gcc::amd64 + + using namespace gcc::amd64; + + }}} // namespace cds::bitop::platform + +//@endcond + +#endif // #ifndef __CDS_COMPILER_GCC_AMD64_BITOP_H diff --git a/cds/compiler/gcc/amd64/cxx11_atomic.h b/cds/compiler/gcc/amd64/cxx11_atomic.h new file mode 100644 index 00000000..afb9a749 --- /dev/null +++ b/cds/compiler/gcc/amd64/cxx11_atomic.h @@ -0,0 +1,207 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_AMD64_CXX11_ATOMIC_H +#define __CDS_COMPILER_GCC_AMD64_CXX11_ATOMIC_H + +#include +#include + +//@cond +namespace cds { namespace cxx11_atomics { + namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace amd64 { +# ifndef CDS_CXX11_INLINE_NAMESPACE_SUPPORT + // primitives up to 32bit + fences + using namespace cds::cxx11_atomics::platform::gcc::x86; +# endif + + //----------------------------------------------------------------------------- + // 64bit primitives + //----------------------------------------------------------------------------- + + template + static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 8 )); + + T prev = expected; + fence_before(mo_success); + __asm__ __volatile__ ( + "lock ; cmpxchgq %[desired], %[pDest]" + : [prev] "+a" (prev), [pDest] "+m" (*pDest) + : [desired] "r" (desired) + ); + bool success = (prev == expected); + expected = prev; + if (success) + fence_after(mo_success); + else + fence_after(mo_fail); + return success; + } + + template + static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas64_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T load64( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + assert( cds::details::is_aligned( pSrc, 8 )); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + + + template + static inline T exchange64( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 8 )); + + fence_before(order); + __asm__ __volatile__ ( + "xchgq %[v], %[pDest]" + : [v] "+r" (v), [pDest] "+m" (*pDest) + ); + fence_after(order); + return v; + } + + template + static inline void store64( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + assert( cds::details::is_aligned( pDest, 8 )); + + if (order != memory_order_seq_cst) { + fence_before(order); + *pDest = val; + } + else { + exchange64( pDest, val, order); + } + } + +# define CDS_ATOMIC_fetch64_add_defined + template + static inline T fetch64_add( T volatile * pDest, T v, memory_order order) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 8 )); + + fence_before(order); + __asm__ __volatile__ ( + "lock ; xaddq %[v], %[pDest]" + : [v] "+r" (v), [pDest] "+m" (*pDest) + ); + fence_after(order); + return v; + } + +# define CDS_ATOMIC_fetch64_sub_defined + template + static inline T fetch64_sub( T volatile * pDest, T v, memory_order order) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 8 )); + + fence_before(order); + __asm__ __volatile__ ( + "negq %[v] ; \n" + "lock ; xaddq %[v], %[pDest]" + : [v] "+r" (v), [pDest] "+m" (*pDest) + ); + fence_after(order); + return v; + } + + + //----------------------------------------------------------------------------- + // pointer primitives + //----------------------------------------------------------------------------- + + template + static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + + return (T *) exchange64( (uint64_t volatile *) pDest, (uint64_t) v, order ); + } + + template + static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange_ptr( pDest, src, order ); + } + } + + template + static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + + T * v = *pSrc; + fence_after_load( order ); + return v; + } + + template + static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + + return cas64_strong( (uint64_t volatile *) pDest, *reinterpret_cast( &expected ), (uint64_t) desired, mo_success, mo_fail ); + } + + template + static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + }} // namespace gcc::amd64 + +#ifndef CDS_CXX11_INLINE_NAMESPACE_SUPPORT + using namespace gcc::amd64; +#endif + } // namespace platform + +}} // namespace cds::cxx11_atomics +//@endcond + +#endif // #ifndef __CDS_COMPILER_GCC_AMD64_CXX11_ATOMIC_H diff --git a/cds/compiler/gcc/compiler_barriers.h b/cds/compiler/gcc/compiler_barriers.h new file mode 100644 index 00000000..eb41faa9 --- /dev/null +++ b/cds/compiler/gcc/compiler_barriers.h @@ -0,0 +1,10 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_COMPILER_BARRIERS_H +#define __CDS_COMPILER_GCC_COMPILER_BARRIERS_H + +#define CDS_COMPILER_RW_BARRIER __asm__ __volatile__ ( "" ::: "memory" ) +#define CDS_COMPILER_R_BARRIER CDS_COMPILER_RW_BARRIER +#define CDS_COMPILER_W_BARRIER CDS_COMPILER_RW_BARRIER + +#endif // #ifndef __CDS_COMPILER_GCC_COMPILER_BARRIERS_H diff --git a/cds/compiler/gcc/compiler_macro.h b/cds/compiler/gcc/compiler_macro.h new file mode 100644 index 00000000..c9e89bcb --- /dev/null +++ b/cds/compiler/gcc/compiler_macro.h @@ -0,0 +1,136 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_COMPILER_MACRO_H +#define __CDS_COMPILER_GCC_COMPILER_MACRO_H + +// OS interface && OS name +#if defined( __linux__ ) +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_LINUX +# define CDS_OS__NAME "linux" +# define CDS_OS__NICK "linux" +#elif defined( __sun__ ) +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_SUN_SOLARIS +# define CDS_OS__NAME "Sun Solaris" +# define CDS_OS__NICK "sun" +#elif defined( __hpux__ ) +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_HPUX +# define CDS_OS__NAME "HP-UX" +# define CDS_OS__NICK "hpux" +#elif defined( _AIX ) +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_AIX +# define CDS_OS__NAME "AIX" +# define CDS_OS__NICK "aix" +#elif defined( __FreeBSD__ ) +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_FREE_BSD +# define CDS_OS__NAME "FreeBSD" +# define CDS_OS__NICK "freebsd" +#elif defined( __OpenBSD__ ) +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_OPEN_BSD +# define CDS_OS__NAME "OpenBSD" +# define CDS_OS__NICK "openbsd" +#elif defined( __NetBSD__ ) +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_NET_BSD +# define CDS_OS__NAME "NetBSD" +# define CDS_OS__NICK "netbsd" +#elif defined(__MINGW32__) || defined( __MINGW64__) +# define CDS_OS_INTERFACE CDS_OSI_WINDOWS +# define CDS_OS_TYPE CDS_OS_MINGW +# define CDS_OS__NAME "MinGW" +# define CDS_OS__NICK "mingw" +#elif defined(__MACH__) +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_OSX +# define CDS_OS__NAME "OS X" +# define CDS_OS__NICK "osx" +#else +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_PTHREAD +# define CDS_OS__NAME "pthread" +# define CDS_OS__NICK "pthread" +#endif + +// Processor architecture + +#if defined(__arm__) && !defined(__ARM_ARCH) + // GCC 4.6 does not defined __ARM_ARCH +# if defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) +# define __ARM_ARCH 7 +# else +# define __ARM_ARCH 5 +# endif +#endif + +#if defined(__x86_64__) || defined(__amd64__) || defined(__amd64) +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_AMD64 +# define CDS_BUILD_BITS 64 +# define CDS_PROCESSOR__NAME "Intel x86-64" +# define CDS_PROCESSOR__NICK "amd64" +#elif defined(__i386__) +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_X86 +# define CDS_BUILD_BITS 32 +# define CDS_PROCESSOR__NAME "Intel x86" +# define CDS_PROCESSOR__NICK "x86" +#elif defined(sparc) || defined (__sparc__) +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_SPARC +# define CDS_PROCESSOR__NAME "Sparc" +# define CDS_PROCESSOR__NICK "sparc" +# ifdef __arch64__ +# define CDS_BUILD_BITS 64 +# else +# error Sparc 32bit is not supported +# endif +#elif defined( __ia64__) +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_IA64 +# define CDS_BUILD_BITS 64 +# define CDS_PROCESSOR__NAME "Intel IA64" +# define CDS_PROCESSOR__NICK "ia64" +#elif defined(_ARCH_PPC64) +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_PPC64 +# define CDS_BUILD_BITS 64 +# define CDS_PROCESSOR__NAME "IBM PowerPC64" +# define CDS_PROCESSOR__NICK "ppc64" +#elif defined(__arm__) && __SIZEOF_POINTER__ == 4 && __ARM_ARCH >= 7 +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_ARM7 +# define CDS_BUILD_BITS 32 +# define CDS_PROCESSOR__NAME "ARM v7" +# define CDS_PROCESSOR__NICK "arm7" +#else +# if defined(CDS_USE_LIBCDS_ATOMIC) +# error "Libcds does not support atomic implementation for the processor architecture. Try to use C++11-compatible compiler and remove CDS_USE_LIBCDS_ATOMIC flag from compiler command line" +# elif !defined(CDS_USE_BOOST_ATOMIC) && !defined(CDS_CXX11_ATOMIC_SUPPORT) +# error "The current compiler does not support C++11 atomic. Try to use boost.atomic (-DCDS_USE_BOOST_ATOMIC) or C++11 compiler" +# else +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_UNKNOWN +# define CDS_BUILD_BITS 32 +# define CDS_PROCESSOR__NAME "unknown" +# define CDS_PROCESSOR__NICK "unknown" +# endif +#endif + +#if CDS_OS_TYPE == CDS_OS_MINGW +# ifdef CDS_BUILD_LIB +# define CDS_EXPORT_API __declspec(dllexport) +# else +# define CDS_EXPORT_API __declspec(dllimport) +# endif +#else +# ifndef __declspec +# define __declspec( _x ) +# endif +#endif + +#if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 +# define CDS_STDCALL __attribute__((stdcall)) +#else +# define CDS_STDCALL +#endif + + +#endif // #ifndef __CDS_COMPILER_GCC_COMPILER_MACRO_H diff --git a/cds/compiler/gcc/cxx11_atomic_patches.h b/cds/compiler/gcc/cxx11_atomic_patches.h new file mode 100644 index 00000000..c6818878 --- /dev/null +++ b/cds/compiler/gcc/cxx11_atomic_patches.h @@ -0,0 +1,47 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_CXX11_ATOMIC_PATCHES_H +#define __CDS_COMPILER_GCC_CXX11_ATOMIC_PATCHES_H + +#if CDS_CXX11_ATOMIC_SUPPORT == 1 +# if CDS_COMPILER_VERSION >= 40600 && CDS_COMPILER_VERSION < 40700 + // GCC 4.6.x has no atomic_thread_fence/atomic_signal_fence + namespace std { + inline void atomic_thread_fence(memory_order order) + { + switch(order) + { + case memory_order_relaxed: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + __sync_synchronize(); + break; + default:; + } + + } + inline void atomic_signal_fence(memory_order order) + { + switch(order) + { + case memory_order_relaxed: + break; + case memory_order_consume: + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + default:; + } + } + } // namespace std +# endif +#endif + +#endif // #ifndef __CDS_COMPILER_GCC_CXX11_ATOMIC_PATCHES_H diff --git a/cds/compiler/gcc/defs.h b/cds/compiler/gcc/defs.h new file mode 100644 index 00000000..803945ec --- /dev/null +++ b/cds/compiler/gcc/defs.h @@ -0,0 +1,143 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_DEFS_H +#define __CDS_COMPILER_GCC_DEFS_H + +// Compiler version +#define CDS_COMPILER_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) + +#if CDS_COMPILER_VERSION < 40300 +# error "Compiler version error. GCC version 4.3.0 and above is supported" +#endif + +// Compiler name +#ifdef __VERSION__ +# define CDS_COMPILER__NAME ("GNU C++ " __VERSION__) +#else +# define CDS_COMPILER__NAME "GNU C++" +#endif +#define CDS_COMPILER__NICK "gcc" + +// C++11 atomic support +#if CDS_COMPILER_VERSION >= 40700 +# define CDS_CXX11_ATOMIC_SUPPORT 1 +#elif defined(CDS_CXX11_ATOMIC_GCC) +# define CDS_CXX11_ATOMIC_SUPPORT 1 +#endif + + +#include + + +#define alignof __alignof__ + +// *************************************** +// C++11 features + +// Variadic template support (only if -std=c++0x compile-line option provided) +#if CDS_COMPILER_VERSION >= 40300 +# define CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT +#endif + +// RValue +#if CDS_COMPILER_VERSION >= 40300 +# define CDS_RVALUE_SUPPORT +# define CDS_MOVE_SEMANTICS_SUPPORT +#endif + +// Default template arguments for function templates +#if CDS_COMPILER_VERSION >= 40300 +# define CDS_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS_SUPPORT +#endif + +// C++11 inline namespace +#if CDS_COMPILER_VERSION >= 40400 +# define CDS_CXX11_INLINE_NAMESPACE_SUPPORT +#endif + +// Delete definition and explicitly-defaulted function +#if CDS_COMPILER_VERSION >= 40400 +// C++11 delete definition ( function declaration = delete) +# define CDS_CXX11_DELETE_DEFINITION_SUPPORT + +// C++11 explicitly-defaulted function (= default) [std 8.4.2 [dcl.fct.def.default]] +# define CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT +#endif + +// Lambda +#if CDS_COMPILER_VERSION >= 40500 +# define CDS_CXX11_LAMBDA_SUPPORT +# if CDS_COMPILER_VERSION < 40800 +# define CDS_BUG_STATIC_MEMBER_IN_LAMBDA +# endif +#endif + +// Explicit conversion operator +#if CDS_COMPILER_VERSION >= 40500 +# define CDS_CXX11_EXPLICIT_CONVERSION_OPERATOR_SUPPORT +#endif + +#if CDS_COMPILER_VERSION >= 40600 +# define CDS_CONSTEXPR constexpr +# define CDS_NOEXCEPT_SUPPORT noexcept +# define CDS_NOEXCEPT_SUPPORT_(expr) noexcept(expr) +# if CDS_COMPILER_VERSION >= 40600 && CDS_COMPILER_VERSION < 40700 + // GCC 4.6.x does not allow noexcept specification in defaulted function + // void foo() noexcept = default + // error: function ‘foo’ defaulted on its first declaration must not have an exception-specification +# define CDS_NOEXCEPT_DEFAULTED +# define CDS_NOEXCEPT_DEFAULTED_(expr) + // GCC 4.6.x: constexpr and const are incompatible in variable declaration +# define CDS_CONSTEXPR_CONST const +# else +# define CDS_CONSTEXPR_CONST constexpr const +# endif +#else +# define CDS_CONSTEXPR +# define CDS_CONSTEXPR_CONST const +# define CDS_NOEXCEPT_SUPPORT +# define CDS_NOEXCEPT_SUPPORT_(expr) +#endif + +// C++11 template alias +#if CDS_COMPILER_VERSION >= 40700 +# define CDS_CXX11_TEMPLATE_ALIAS_SUPPORT +#endif + +// C++11 thread_local keyword +#if CDS_COMPILER_VERSION >= 40800 +# define CDS_CXX11_THREAD_LOCAL_SUPPORT +#endif + + +#if defined( CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT ) && defined(CDS_MOVE_SEMANTICS_SUPPORT) && CDS_COMPILER_VERSION >= 40400 && CDS_COMPILER_VERSION < 40600 + // GCC 4.4 - 4.5 bug: move ctor & assignment operator cannot be defaulted +# define CDS_DISABLE_DEFAULT_MOVE_CTOR +#endif + +#include + +// Thread support library (thread, mutex, condition variable, chrono) +#if CDS_COMPILER_VERSION >= 40800 +# define CDS_CXX11_STDLIB_THREAD +# define CDS_CXX11_STDLIB_MUTEX +# define CDS_CXX11_STDLIB_CONDITION_VARIABLE +# define CDS_CXX11_STDLIB_CHRONO +#endif + +// Full SFINAE support +#if CDS_COMPILER_VERSION >= 40700 +# define CDS_CXX11_SFINAE +#endif + +// ************************************************* +// Alignment macro + +#define CDS_TYPE_ALIGNMENT(n) __attribute__ ((aligned (n))) +#define CDS_CLASS_ALIGNMENT(n) __attribute__ ((aligned (n))) +#define CDS_DATA_ALIGNMENT(n) __attribute__ ((aligned (n))) + + +#include + +#endif // #ifndef __CDS_COMPILER_GCC_DEFS_H diff --git a/cds/compiler/gcc/ia64/backoff.h b/cds/compiler/gcc/ia64/backoff.h new file mode 100644 index 00000000..84d718c1 --- /dev/null +++ b/cds/compiler/gcc/ia64/backoff.h @@ -0,0 +1,37 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_IA64_BACKOFF_H +#define __CDS_COMPILER_GCC_IA64_BACKOFF_H + +//@cond none + +namespace cds { namespace backoff { + namespace gcc { namespace ia64 { + +# define CDS_backoff_pause_defined + static inline void backoff_pause( unsigned int nLoop = 0x000003FF ) + { + asm volatile ( "hint @pause" ); + } + +# define CDS_backoff_hint_defined + static inline void backoff_hint() + { + asm volatile ( "hint @pause;;" ); + } + +# define CDS_backoff_nop_defined + static inline void backoff_nop() + { + asm volatile ( "nop;;" ); + } + + }} // namespace gcc::ia64 + + namespace platform { + using namespace gcc::ia64; + } +}} // namespace cds::backoff + +//@endcond +#endif // #ifndef __CDS_COMPILER_GCC_IA64_BACKOFF_H diff --git a/cds/compiler/gcc/ia64/bitop.h b/cds/compiler/gcc/ia64/bitop.h new file mode 100644 index 00000000..64e21e1c --- /dev/null +++ b/cds/compiler/gcc/ia64/bitop.h @@ -0,0 +1,62 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_IA64_BITOP_H +#define __CDS_COMPILER_GCC_IA64_BITOP_H + +//@cond none +namespace cds { + namespace bitop { namespace platform { namespace gcc { namespace ia64 { + + // MSB - return index (1..32) of most significant bit in x. If x == 0 return 0 +# define cds_bitop_msb32_DEFINED + static inline int msb32( atomic32u_t nArg ) + { + if ( !nArg ) + return 0; + atomic64u_t x = nArg; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + + atomic64u_t nRes; + asm __volatile__( "popcnt %0=%1\n\t" : "=r" (nRes) : "r" (x) ); + return (int) nRes; + } + + // It is not compiled on HP-UX. Why?.. +#if CDS_OS_TYPE != CDS_OS_HPUX + // MSB - return index (0..31) of most significant bit in nArg. + // !!! nArg != 0 +# define cds_bitop_msb32nz_DEFINED + static inline int msb32nz( atomic32u_t nArg ) + { + assert( nArg != 0 ); + long double d = nArg; + long nExp; + asm __volatile__("getf.exp %0=%1\n\t" : "=r"(nExp) : "f"(d)); + return (int) (nExp - 0xffff); + } + + // MSB - return index (0..63) of most significant bit in nArg. + // !!! nArg != 0 +# define cds_bitop_msb64nz_DEFINED + static inline int msb64nz( atomic64u_t nArg ) + { + assert( nArg != 0 ); + long double d = nArg; + long nExp; + asm __volatile__("getf.exp %0=%1\n\t" : "=r" (nExp) : "f" (d)); + return (int) (nExp - 0xffff); + } +#endif // #if CDS_OS_TYPE != CDS_OS_HPUX + + }} // namespace gcc::ia64 + + using namespace gcc::ia64; + +}}} // namespace cds::bitop::platform +//@endcond + +#endif // #ifndef __CDS_COMPILER_GCC_IA64_BITOP_H diff --git a/cds/compiler/gcc/ia64/cxx11_atomic.h b/cds/compiler/gcc/ia64/cxx11_atomic.h new file mode 100644 index 00000000..cacfa9d1 --- /dev/null +++ b/cds/compiler/gcc/ia64/cxx11_atomic.h @@ -0,0 +1,653 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_IA64_CXX11_ATOMIC_H +#define __CDS_COMPILER_GCC_IA64_CXX11_ATOMIC_H + +/* + Source: + 1. load/store: http://www.decadent.org.uk/pipermail/cpp-threads/2008-December/001932.html + 2. Mapping to C++ Memory Model: http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html +*/ + +#include + +//@cond +namespace cds { namespace cxx11_atomics { + namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace ia64 { + + static inline void itanium_full_fence() CDS_NOEXCEPT + { + __asm__ __volatile__ ( "mf \n\t" ::: "memory" ); + } + + static inline void fence_before( memory_order order ) CDS_NOEXCEPT + { + switch(order) { + case memory_order_relaxed: + case memory_order_consume: + case memory_order_acquire: + break; + case memory_order_release: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_seq_cst: + itanium_full_fence(); + break; + } + } + + static inline void fence_after( memory_order order ) CDS_NOEXCEPT + { + switch(order) { + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_relaxed: + case memory_order_consume: + case memory_order_release: + break; + case memory_order_seq_cst: + itanium_full_fence(); + break; + } + } + + + //----------------------------------------------------------------------------- + // fences + //----------------------------------------------------------------------------- + static inline void thread_fence(memory_order order) CDS_NOEXCEPT + { + switch(order) + { + case memory_order_relaxed: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_seq_cst: + itanium_full_fence(); + break; + default:; + } + } + + static inline void signal_fence(memory_order order) CDS_NOEXCEPT + { + // C++11: 29.8.8: only compiler optimization, no hardware instructions + switch(order) + { + case memory_order_relaxed: + break; + case memory_order_consume: + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + default:; + } + } + +#define CDS_ITANIUM_ATOMIC_LOAD( n_bytes, n_bits ) \ + template \ + static inline T load##n_bits( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT \ + { \ + static_assert( sizeof(T) == n_bytes, "Illegal size of operand" ) ; \ + assert( order == memory_order_relaxed \ + || order == memory_order_consume \ + || order == memory_order_acquire \ + || order == memory_order_seq_cst \ + ) ; \ + assert( pSrc != NULL ) ; \ + T val ; \ + __asm__ __volatile__ ( \ + "ld" #n_bytes ".acq %[val] = [%[pSrc]] \n\t" \ + : [val] "=r" (val) \ + : [pSrc] "r" (pSrc) \ + : "memory" \ + ) ; \ + return val ; \ + } + +#define CDS_ITANIUM_ATOMIC_STORE( n_bytes, n_bits ) \ + template \ + static inline void store##n_bits( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT \ + { \ + static_assert( sizeof(T) == n_bytes, "Illegal size of operand" ) ; \ + assert( order == memory_order_relaxed \ + || order == memory_order_release \ + || order == memory_order_seq_cst \ + ) ; \ + assert( pDest != NULL ) ; \ + if ( order == memory_order_seq_cst ) { \ + __asm__ __volatile__ ( \ + "st" #n_bytes ".rel [%[pDest]] = %[val] \n\t" \ + "mf \n\t" \ + :: [pDest] "r" (pDest), [val] "r" (val) \ + : "memory" \ + ) ; \ + } \ + else { \ + __asm__ __volatile__ ( \ + "st" #n_bytes ".rel [%[pDest]] = %[val] \n\t" \ + :: [pDest] "r" (pDest), [val] "r" (val) \ + : "memory" \ + ) ; \ + fence_after(order) ; \ + } \ + } + +#define CDS_ITANIUM_ATOMIC_CAS( n_bytes, n_bits ) \ + template \ + static inline bool cas##n_bits##_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order /*mo_fail*/ ) CDS_NOEXCEPT \ + { \ + static_assert( sizeof(T) == n_bytes, "Illegal size of operand" ) ; \ + T current ; \ + switch(mo_success) { \ + case memory_order_relaxed: \ + case memory_order_consume: \ + case memory_order_acquire: \ + __asm__ __volatile__ ( \ + "mov ar.ccv = %[expected] ;;\n\t" \ + "cmpxchg" #n_bytes ".acq %[current] = [%[pDest]], %[desired], ar.ccv\n\t" \ + : [current] "=r" (current) \ + : [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) \ + : "ar.ccv", "memory" \ + ); \ + break ; \ + case memory_order_release: \ + __asm__ __volatile__ ( \ + "mov ar.ccv = %[expected] ;;\n\t" \ + "cmpxchg" #n_bytes ".rel %[current] = [%[pDest]], %[desired], ar.ccv\n\t" \ + : [current] "=r" (current) \ + : [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) \ + : "ar.ccv", "memory" \ + ); \ + break ; \ + case memory_order_acq_rel: \ + case memory_order_seq_cst: \ + __asm__ __volatile__ ( \ + "mov ar.ccv = %[expected] ;;\n\t" \ + "cmpxchg" #n_bytes ".rel %[current] = [%[pDest]], %[desired], ar.ccv\n\t" \ + "mf \n\t" \ + : [current] "=r" (current) \ + : [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) \ + : "ar.ccv", "memory" \ + ); \ + break; \ + default: \ + assert(false); \ + } \ + bool bSuccess = expected == current ; \ + expected = current ; \ + return bSuccess ; \ + } \ + template \ + static inline bool cas##n_bits##_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT \ + { return cas##n_bits##_strong( pDest, expected, desired, mo_success, mo_fail ); } + + // xchg is performed with acquire semantics +#define CDS_ITANIUM_ATOMIC_EXCHANGE( n_bytes, n_bits ) \ + template \ + static inline T exchange##n_bits( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT \ + { \ + static_assert( sizeof(T) == n_bytes, "Illegal size of operand" ) ; \ + assert( pDest != NULL ) ; \ + T current ; \ + switch(order) \ + { \ + case memory_order_relaxed: \ + case memory_order_consume: \ + case memory_order_acquire: \ + __asm__ __volatile__ ( \ + "xchg" #n_bytes " %[current] = [%[pDest]], %[val]\n\t" \ + : [current] "=r" (current) \ + : [pDest] "r" (pDest), [val] "r" (val) \ + : "memory" \ + ); \ + break; \ + case memory_order_acq_rel: \ + case memory_order_release: \ + case memory_order_seq_cst: \ + __asm__ __volatile__ ( \ + "mf \n\t" \ + "xchg" #n_bytes " %[current] = [%[pDest]], %[val]\n\t" \ + : [current] "=r" (current) \ + : [pDest] "r" (pDest), [val] "r" (val) \ + : "memory" \ + ); \ + break; \ + default: assert(false); \ + } \ + return current ; \ + } + +#define CDS_ITANIUM_ATOMIC_FETCH_ADD( n_bytes, n_add ) \ + switch (order) { \ + case memory_order_relaxed: \ + case memory_order_consume: \ + case memory_order_acquire: \ + __asm__ __volatile__ ( \ + "fetchadd" #n_bytes ".acq %[cur] = [%[pDest]], " #n_add " \n\t" \ + : [cur] "=r" (cur) \ + : [pDest] "r" (pDest) \ + : "memory" \ + ); \ + break ; \ + case memory_order_release: \ + __asm__ __volatile__ ( \ + "fetchadd" #n_bytes ".rel %[cur] = [%[pDest]], " #n_add " \n\t" \ + : [cur] "=r" (cur) \ + : [pDest] "r" (pDest) \ + : "memory" \ + ); \ + break ; \ + case memory_order_acq_rel: \ + case memory_order_seq_cst: \ + __asm__ __volatile__ ( \ + "fetchadd" #n_bytes ".rel %[cur] = [%[pDest]], " #n_add " \n\t" \ + "mf \n\t" \ + : [cur] "=r" (cur) \ + : [pDest] "r" (pDest) \ + : "memory" \ + ); \ + break ; \ + default: \ + assert(false); \ + } + + //----------------------------------------------------------------------------- + // 8bit primitives + //----------------------------------------------------------------------------- + + CDS_ITANIUM_ATOMIC_LOAD( 1, 8 ) + CDS_ITANIUM_ATOMIC_STORE( 1, 8 ) + CDS_ITANIUM_ATOMIC_CAS( 1, 8 ) + CDS_ITANIUM_ATOMIC_EXCHANGE( 1, 8 ) + + //----------------------------------------------------------------------------- + // 16bit primitives + //----------------------------------------------------------------------------- + + CDS_ITANIUM_ATOMIC_LOAD( 2, 16 ) + CDS_ITANIUM_ATOMIC_STORE( 2, 16 ) + CDS_ITANIUM_ATOMIC_CAS( 2, 16 ) + CDS_ITANIUM_ATOMIC_EXCHANGE( 2, 16 ) + + //----------------------------------------------------------------------------- + // 32bit primitives + //----------------------------------------------------------------------------- + + CDS_ITANIUM_ATOMIC_LOAD( 4, 32 ) + CDS_ITANIUM_ATOMIC_STORE( 4, 32 ) + CDS_ITANIUM_ATOMIC_CAS( 4, 32 ) + CDS_ITANIUM_ATOMIC_EXCHANGE( 4, 32 ) + +# define CDS_ATOMIC_fetch32_add_defined + template + static inline T fetch32_add( T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( pDest != NULL ); + + T cur; + switch ( val ) { + case 1: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, 1 ); + break; + case 4: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, 4 ); + break; + case 8: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, 8 ); + break; + case 16: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, 16 ); + break; + default: + cur = load32( pDest, memory_order_relaxed ); + do {} while ( !cas32_strong( pDest, cur, cur + val, order, memory_order_relaxed )); + break; + } + return cur; + } + +# define CDS_ATOMIC_fetch32_sub_defined + template + static inline T fetch32_sub( T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( pDest != NULL ); + T cur; + switch ( val ) { + case 1: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, -1 ); + break; + case 4: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, -4 ); + break; + case 8: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, -8 ); + break; + case 16: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, -16 ); + break; + default: + cur = load32( pDest, memory_order_relaxed ); + do {} while ( !cas32_strong( pDest, cur, cur - val, order, memory_order_relaxed )); + break; + } + return cur; + } + + //----------------------------------------------------------------------------- + // 64bit primitives + //----------------------------------------------------------------------------- + + CDS_ITANIUM_ATOMIC_LOAD( 8, 64 ) + CDS_ITANIUM_ATOMIC_STORE( 8, 64 ) + CDS_ITANIUM_ATOMIC_CAS( 8, 64 ) + CDS_ITANIUM_ATOMIC_EXCHANGE( 8, 64 ) + +# define CDS_ATOMIC_fetch64_add_defined + template + static inline T fetch64_add( T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( pDest != NULL ); + + T cur; + switch ( val ) { + case 1: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 1 ); + break; + case 4: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 4 ); + break; + case 8: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 8 ); + break; + case 16: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 16 ); + break; + default: + cur = load64( pDest, memory_order_relaxed ); + do {} while ( !cas64_strong( pDest, cur, cur + val, order, memory_order_relaxed )); + break; + } + return cur; + } + +# define CDS_ATOMIC_fetch64_sub_defined + template + static inline T fetch64_sub( T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( pDest != NULL ); + T cur; + switch ( val ) { + case 1: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -1 ); + break; + case 4: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -4 ); + break; + case 8: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -8 ); + break; + case 16: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -16 ); + break; + default: + cur = load64( pDest, memory_order_relaxed ); + do {} while ( !cas64_strong( pDest, cur, cur - val, order, memory_order_relaxed )); + break; + } + return cur; + } + + //----------------------------------------------------------------------------- + // pointer primitives + //----------------------------------------------------------------------------- + template + static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + T * val; + __asm__ __volatile__ ( + "ld8.acq %[val] = [%[pSrc]] \n\t" + : [val] "=r" (val) + : [pSrc] "r" (pSrc) + : "memory" + ); + return val; + } + + template + static inline void store_ptr( T * volatile * pDest, T * val, memory_order order ) CDS_NOEXCEPT + { + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + + if ( order == memory_order_seq_cst ) { + __asm__ __volatile__ ( + "st8.rel [%[pDest]] = %[val] \n\t" + "mf \n\t" + :: [pDest] "r" (pDest), [val] "r" (val) + : "memory" + ); + } + else { + __asm__ __volatile__ ( + "st8.rel [%[pDest]] = %[val] \n\t" + :: [pDest] "r" (pDest), [val] "r" (val) + : "memory" + ); + fence_after(order); + } + } + + template + static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == 8, "Illegal size of operand" ); + assert( pDest != NULL ); + + T * current; + + switch(mo_success) { + case memory_order_relaxed: + case memory_order_consume: + case memory_order_acquire: + __asm__ __volatile__ ( + "mov ar.ccv = %[expected] ;;\n\t" + "cmpxchg8.acq %[current] = [%[pDest]], %[desired], ar.ccv\n\t" + : [current] "=r" (current) + : [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) + : "ar.ccv", "memory" + ); + break; + case memory_order_release: + __asm__ __volatile__ ( + "mov ar.ccv = %[expected] ;;\n\t" + "cmpxchg8.rel %[current] = [%[pDest]], %[desired], ar.ccv\n\t" + : [current] "=r" (current) + : [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) + : "ar.ccv", "memory" + ); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + __asm__ __volatile__ ( + "mov ar.ccv = %[expected] ;;\n\t" + "cmpxchg8.rel %[current] = [%[pDest]], %[desired], ar.ccv\n\t" + "mf \n\t" + : [current] "=r" (current) + : [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) + : "ar.ccv", "memory" + ); + break; + default: + assert(false); + } + + bool bSuccess = expected == current; + expected = current; + if ( !bSuccess ) + fence_after( mo_fail ); + return bSuccess; + } + + template + static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T * exchange_ptr( T * volatile * pDest, T * val, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == 8, "Illegal size of operand" ); + assert( pDest != NULL ); + + T * current; + switch(order) { + case memory_order_relaxed: + case memory_order_consume: + case memory_order_acquire: + __asm__ __volatile__ ( + "xchg8 %[current] = [%[pDest]], %[val]\n\t" + : [current] "=r" (current) + : [pDest] "r" (pDest), [val] "r" (val) + : "memory" + ); + break; + case memory_order_acq_rel: + case memory_order_release: + case memory_order_seq_cst: + __asm__ __volatile__ ( + "mf \n\t" + "xchg8 %[current] = [%[pDest]], %[val]\n\t" + : [current] "=r" (current) + : [pDest] "r" (pDest), [val] "r" (val) + : "memory" + ); + break; + default: assert(false); + } + return current; + } + + + template struct atomic_pointer_sizeof { enum { value = sizeof(T) }; }; + template <> struct atomic_pointer_sizeof { enum { value = 1 }; }; + + // It does not work properly + // atomic.fetch_add( ... ) returns NULL, why?.. +//# define CDS_ATOMIC_fetch_ptr_add_defined + template + static inline T * fetch_ptr_add( T * volatile * pDest, ptrdiff_t val, memory_order order) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == 8, "Illegal size of operand" ); + assert( pDest != NULL ); + + T * cur; + val *= atomic_pointer_sizeof::value; + switch ( val ) { + case 1: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 1 ); + break; + case 4: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 4 ); + break; + case 8: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 8 ); + break; + case 16: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 16 ); + break; + default: + cur = load_ptr( pDest, memory_order_relaxed ); + do {} while ( !cas_ptr_strong( pDest, cur, reinterpret_cast(reinterpret_cast(cur) + val), order, memory_order_relaxed )); + break; + } + return cur; + } + + // It does not work properly + // atomic.fetch_sub( ... ) returns NULL, why?.. +//# define CDS_ATOMIC_fetch_ptr_sub_defined + template + static inline T * fetch_ptr_sub( T * volatile * pDest, ptrdiff_t val, memory_order order) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == 8, "Illegal size of operand" ); + assert( pDest != NULL ); + T * cur; + val *= atomic_pointer_sizeof::value; + switch ( val ) { + case 1: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -1 ); + break; + case 4: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -4 ); + break; + case 8: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -8 ); + break; + case 16: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -16 ); + break; + default: + cur = load_ptr( pDest, memory_order_relaxed ); + do {} while ( !cas_ptr_strong( pDest, cur, reinterpret_cast(reinterpret_cast(cur) - val), order, memory_order_relaxed )); + break; + } + return cur; + } + + //----------------------------------------------------------------------------- + // atomic flag primitives + //----------------------------------------------------------------------------- + + typedef bool atomic_flag_type; + static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order order ) CDS_NOEXCEPT + { + return exchange8( pFlag, true, order ); + } + + static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) CDS_NOEXCEPT + { + store8( pFlag, false, order ); + } + +#undef CDS_ITANIUM_ATOMIC_LOAD +#undef CDS_ITANIUM_ATOMIC_STORE +#undef CDS_ITANIUM_ATOMIC_CAS +#undef CDS_ITANIUM_ATOMIC_EXCHANGE +#undef CDS_ITANIUM_ATOMIC_FETCH_ADD + + }} // namespace gcc::ia64 + +#ifndef CDS_CXX11_INLINE_NAMESPACE_SUPPORT + using namespace gcc::ia64; +#endif + } // namespace platform +}} // namespace cds::cxx11_atomics +//@endcond + +#endif // #ifndef __CDS_COMPILER_GCC_IA64_CXX11_ATOMIC_H diff --git a/cds/compiler/gcc/ppc64/backoff.h b/cds/compiler/gcc/ppc64/backoff.h new file mode 100644 index 00000000..d4d1252b --- /dev/null +++ b/cds/compiler/gcc/ppc64/backoff.h @@ -0,0 +1,19 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_PPC64_BACKOFF_H +#define __CDS_COMPILER_GCC_PPC64_BACKOFF_H + +//@cond none + +namespace cds { namespace backoff { + namespace gcc { namespace ppc64 { + + }} // namespace gcc::ppc64 + + namespace platform { + using namespace gcc::ppc64; + } +}} // namespace cds::backoff + +//@endcond +#endif // #ifndef __CDS_COMPILER_GCC_PPC64_BACKOFF_H diff --git a/cds/compiler/gcc/ppc64/bitop.h b/cds/compiler/gcc/ppc64/bitop.h new file mode 100644 index 00000000..c435d396 --- /dev/null +++ b/cds/compiler/gcc/ppc64/bitop.h @@ -0,0 +1,17 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_PPC64_BITOP_H +#define __CDS_COMPILER_GCC_PPC64_BITOP_H + +//@cond none +namespace cds { + namespace bitop { namespace platform { namespace gcc { namespace ppc64 { + + }} // namespace gcc::ppc64 + + using namespace gcc::ppc64; + +}}} // namespace cds::bitop::platform +//@endcond + +#endif // #ifndef __CDS_COMPILER_GCC_PPC64_BITOP_H diff --git a/cds/compiler/gcc/sparc/backoff.h b/cds/compiler/gcc/sparc/backoff.h new file mode 100644 index 00000000..e8f9b28c --- /dev/null +++ b/cds/compiler/gcc/sparc/backoff.h @@ -0,0 +1,30 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_SPARC_BACKOFF_H +#define __CDS_COMPILER_GCC_SPARC_BACKOFF_H + +//@cond none + +namespace cds { namespace backoff { + namespace gcc { namespace Sparc { + +# define CDS_backoff_pause_defined + static inline void backoff_pause( unsigned int nLoop = 0x000003FF ) + {} + +# define CDS_backoff_nop_defined + static inline void backoff_nop() + { + asm volatile ( "nop;" ); + } + + + }} // namespace gcc::Sparc + + namespace platform { + using namespace gcc::Sparc; + } +}} // namespace cds::backoff + +//@endcond +#endif // #ifndef __CDS_COMPILER_GCC_SPARC_BACKOFF_H diff --git a/cds/compiler/gcc/sparc/bitop.h b/cds/compiler/gcc/sparc/bitop.h new file mode 100644 index 00000000..632b4018 --- /dev/null +++ b/cds/compiler/gcc/sparc/bitop.h @@ -0,0 +1,42 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_SPARC_BITOP_H +#define __CDS_COMPILER_GCC_SPARC_BITOP_H + +//@cond none +namespace cds { + namespace bitop { namespace platform { namespace gcc { namespace Sparc { + + // MSB - return index (1..64) of most significant bit in nArg. If nArg == 0 return 0 + // Source: UltraSPARC Architecture 2007 + // + // Test result: this variant and its variation about 100 times slower then generic implementation :-( + static inline int sparc_msb64( atomic64u_t nArg ) + { + atomic64u_t result; + asm volatile ( + "neg %[nArg], %[result] \n\t" + "xnor %[nArg], %[result], %%g5 \n\t" + "popc %%g5, %[result] \n\t" + "movrz %[nArg], %%g0, %[result] \n\t" + : [result] "=r" (result) + : [nArg] "r" (nArg) + : "g5" + ); + return result; + } + + // MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0 + static inline int sparc_msb32( atomic32u_t nArg ) + { + return sparc_msb64( (atomic64u_t) nArg ); + } + + }} // namespace gcc::Sparc + + using namespace gcc::Sparc; + +}}} // namespace cds::bitop::platform +//@endcond + +#endif // #ifndef __CDS_COMPILER_GCC_SPARC_BITOP_H diff --git a/cds/compiler/gcc/sparc/cxx11_atomic.h b/cds/compiler/gcc/sparc/cxx11_atomic.h new file mode 100644 index 00000000..801bc796 --- /dev/null +++ b/cds/compiler/gcc/sparc/cxx11_atomic.h @@ -0,0 +1,610 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_SPARC_CXX11_ATOMIC_H +#define __CDS_COMPILER_GCC_SPARC_CXX11_ATOMIC_H + +#include + +/* + Source: + + 1. [Doug Lea "JSR-133 Cookbook for Compiler Writers]: + + Acquire semantics: load; LoadLoad+LoadStore + Release semantics: LoadStore+StoreStore; store + + 2. boost::atomic library by Helge Bahman + 3. OpenSparc source code +*/ + +#if CDS_OS_TYPE == CDS_OS_LINUX +# define CDS_SPARC_RMO_MEMORY_MODEL +#endif + +#define CDS_SPARC_MB_FULL "membar #Sync \n\t" +#ifdef CDS_SPARC_RMO_MEMORY_MODEL + // RMO memory model (Linux only?..) Untested +# define CDS_SPARC_MB_LL_LS "membar #LoadLoad|#LoadStore \n\t" +# define CDS_SPARC_MB_LS_SS "membar #LoadStore|#StoreStore \n\t" +# define CDS_SPARC_MB_LL_LS_SS "membar #LoadLoad|#LoadStore|#StoreStore \n\t" +#else + // TSO memory model (default; Solaris uses this model) +# define CDS_SPARC_MB_LL_LS +# define CDS_SPARC_MB_LS_SS +# define CDS_SPARC_MB_LL_LS_SS +#endif + +#define CDS_SPARC_MB_ACQ CDS_SPARC_MB_LL_LS +#define CDS_SPARC_MB_REL CDS_SPARC_MB_LS_SS +#define CDS_SPARC_MB_ACQ_REL CDS_SPARC_MB_LL_LS_SS +#define CDS_SPARC_MB_SEQ_CST CDS_SPARC_MB_FULL + +//@cond +namespace cds { namespace cxx11_atomics { + namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace Sparc { + + static inline void fence_before( memory_order order ) CDS_NOEXCEPT + { + switch(order) { + case memory_order_relaxed: + case memory_order_acquire: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acq_rel: + __asm__ __volatile__ ( "" CDS_SPARC_MB_REL ::: "memory" ); + break; + case memory_order_seq_cst: + __asm__ __volatile__ ( "" CDS_SPARC_MB_FULL ::: "memory" ); + break; + } + } + + static inline void fence_after( memory_order order ) CDS_NOEXCEPT + { + switch(order) { + case memory_order_relaxed: + case memory_order_consume: + case memory_order_release: + break; + case memory_order_acquire: + case memory_order_acq_rel: + __asm__ __volatile__ ( "" CDS_SPARC_MB_ACQ ::: "memory" ); + break; + case memory_order_seq_cst: + __asm__ __volatile__ ( "" CDS_SPARC_MB_FULL ::: "memory" ); + break; + } + } + + + //----------------------------------------------------------------------------- + // fences + //----------------------------------------------------------------------------- + static inline void thread_fence(memory_order order) CDS_NOEXCEPT + { + switch(order) + { + case memory_order_relaxed: + case memory_order_consume: + break; + case memory_order_acquire: + __asm__ __volatile__ ( "" CDS_SPARC_MB_ACQ ::: "memory" ); + break; + case memory_order_release: + __asm__ __volatile__ ( "" CDS_SPARC_MB_REL ::: "memory" ); + break; + case memory_order_acq_rel: + __asm__ __volatile__ ( "" CDS_SPARC_MB_ACQ_REL ::: "memory" ); + break; + case memory_order_seq_cst: + __asm__ __volatile__ ( "" CDS_SPARC_MB_SEQ_CST ::: "memory" ); + break; + default:; + } + } + + static inline void signal_fence(memory_order order) CDS_NOEXCEPT + { + // C++11: 29.8.8: only compiler optimization, no hardware instructions + switch(order) + { + case memory_order_relaxed: + break; + case memory_order_consume: + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + default:; + } + } + + //----------------------------------------------------------------------------- + // atomic flag primitives + //----------------------------------------------------------------------------- + + typedef unsigned char atomic_flag_type; + static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order order ) CDS_NOEXCEPT + { + atomic_flag_type fCur; + fence_before( order ); + __asm__ __volatile__( + "ldstub [%[pFlag]], %[fCur] \n\t" + : [fCur] "=r"(fCur) + : [pFlag] "r"(pFlag) + : "memory", "cc" + ); + fence_after( order ); + return fCur != 0; + } + + static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) CDS_NOEXCEPT + { + fence_before( order ); + __asm__ __volatile__( + CDS_SPARC_MB_REL + "stub %%g0, [%[pFlag]] \n\t" + :: [pFlag] "r"(pFlag) + : "memory" + ); + fence_after( order ); + } + + //----------------------------------------------------------------------------- + // 32bit primitives + //----------------------------------------------------------------------------- + + template + static inline void store32( T volatile * pDest, T src, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + + fence_before(order); + *pDest = src; + fence_after(order); + } + + template + static inline T load32( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + + fence_before(order); + T v = *pSrc; + fence_after(order); + return v; + } + + template + static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( pDest != NULL ); + + fence_before( mo_success ); + __asm__ __volatile__( + "cas [%[pDest]], %[expected], %[desired]" + : [desired] "+r" (desired) + : [pDest] "r" (pDest), [expected] "r" (expected) + : "memory" + ); + + // desired contains current value + + bool bSuccess = desired == expected; + if ( bSuccess ) + fence_after( mo_success ); + else { + fence_after(mo_fail); + expected = desired; + } + + return bSuccess; + } + + template + static inline bool cas32_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas32_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T exchange32( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( pDest != NULL ); + + // This primitive could be implemented via "swap" instruction but "swap" is deprecated in UltraSparc + + T cur = load32( pDest, memory_order_relaxed ); + do {} while ( !cas32_strong( pDest, cur, v, order, memory_order_relaxed )); + return cur; + } + + //----------------------------------------------------------------------------- + // 64bit primitives + //----------------------------------------------------------------------------- + + template + static inline T load64( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + + fence_before(order); + T v = *pSrc; + fence_after(order); + return v; + } + + template + static inline void store64( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + + fence_before(order); + *pDest = val; + fence_after(order); + + } + + template + static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( pDest != NULL ); + + fence_before( mo_success ); + __asm__ __volatile__( + "casx [%[pDest]], %[expected], %[desired]" + : [desired] "+r" (desired) + : [pDest] "r" (pDest), [expected] "r" (expected) + : "memory" + ); + + // desired contains current value + + bool bSuccess = desired == expected; + if ( bSuccess ) { + fence_after( mo_success ); + } + else { + fence_after(mo_fail); + expected = desired; + } + + return bSuccess; + } + + template + static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas64_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T exchange64( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( pDest != NULL ); + + T cur = load64( pDest, memory_order_relaxed ); + do {} while ( !cas64_strong( pDest, cur, v, order, memory_order_relaxed )); + return cur; + } + + //----------------------------------------------------------------------------- + // 8bit primitives + //----------------------------------------------------------------------------- + + template + static inline void store8( T volatile * pDest, T src, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + + fence_before( order ); + *pDest = src; + fence_after( order ); + } + + template + static inline T load8( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + + fence_before( order ); + T v = *pSrc; + fence_after( order ); + return v; + } + + template + static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + assert( pDest != NULL ); + + union u32 { + uint32_t w; + T c[4]; + }; + static_assert( sizeof(u32) == sizeof(uint32_t), "Argument size error" ); + + u32 volatile * pDest32 = (u32 *)( uintptr_t( pDest ) & ~0x03 ); + size_t const nCharIdx = (size_t)( uintptr_t( pDest ) & 0x03 ); + u32 uExpected; + u32 uDesired; + + bool bSuccess; + for (;;) { + uExpected.w = + uDesired.w = pDest32->w; + uExpected.c[nCharIdx] = expected; + uDesired.c[nCharIdx] = desired; + + bSuccess = cas32_weak( reinterpret_cast(pDest32), uExpected.w, uDesired.w, mo_success, mo_fail ); + if ( bSuccess || uExpected.c[nCharIdx] != expected ) + break; + } + + expected = uExpected.c[nCharIdx]; + return bSuccess; + } + + template + static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + assert( pDest != NULL ); + + union u32 { + uint32_t w; + T c[4]; + }; + static_assert( sizeof(u32) == sizeof(uint32_t), "Argument size error" ); + + u32 volatile * pDest32 = (u32 *)( uintptr_t( pDest ) & ~0x03 ); + size_t const nCharIdx = (size_t)( uintptr_t( pDest ) & 0x03 ); + u32 uExpected; + u32 uDesired; + + uExpected.w = + uDesired.w = pDest32->w; + uExpected.c[nCharIdx] = expected; + uDesired.c[nCharIdx] = desired; + + bool bSuccess = cas32_weak( reinterpret_cast(pDest32), uExpected.w, uDesired.w, mo_success, mo_fail ); + + expected = uExpected.c[nCharIdx]; + return bSuccess; + } + + template + static inline T exchange8( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + assert( pDest != NULL ); + + T cur = load8( pDest, memory_order_relaxed ); + do {} while ( !cas8_strong( pDest, cur, v, order, memory_order_relaxed )); + return cur; + } + + //----------------------------------------------------------------------------- + // 16bit primitives + //----------------------------------------------------------------------------- + + template + static inline T load16( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + + fence_before( order ); + T v = *pSrc; + fence_after( order ); + return v; + } + + template + static inline void store16( T volatile * pDest, T src, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + + fence_before(order); + *pDest = src; + fence_after(order); + } + + template + static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( pDest != NULL ); + + union u32 { + uint32_t w; + T c[2]; + }; + static_assert( sizeof(u32) == sizeof(uint32_t), "Argument size error" ); + + u32 volatile * pDest32 = (u32 *)( uintptr_t( pDest ) & ~0x03 ); + size_t const nIdx = (size_t)( (uintptr_t( pDest ) >> 1) & 0x01 ); + u32 uExpected; + u32 uDesired; + + bool bSuccess; + for (;;) { + uExpected.w = + uDesired.w = pDest32->w; + uExpected.c[nIdx] = expected; + uDesired.c[nIdx] = desired; + + bSuccess = cas32_weak( reinterpret_cast(pDest32), uExpected.w, uDesired.w, mo_success, mo_fail ); + if ( bSuccess || uExpected.c[nIdx] != expected ) + break; + } + + expected = uExpected.c[nIdx]; + return bSuccess; + } + + template + static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( pDest != NULL ); + + union u32 { + uint32_t w; + T c[2]; + }; + static_assert( sizeof(u32) == sizeof(uint32_t), "Argument size error" ); + + u32 volatile * pDest32 = (u32 *)( uintptr_t( pDest ) & ~0x03 ); + size_t const nIdx = (size_t)( (uintptr_t( pDest ) >> 1) & 0x01 ); + u32 uExpected; + u32 uDesired; + + uExpected.w = + uDesired.w = pDest32->w; + uExpected.c[nIdx] = expected; + uDesired.c[nIdx] = desired; + + bool bSuccess = cas32_weak( reinterpret_cast(pDest32), uExpected.w, uDesired.w, mo_success, mo_fail ); + + expected = uExpected.c[nIdx]; + return bSuccess; + } + + template + static inline T exchange16( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( pDest != NULL ); + + T cur = load16( pDest, memory_order_relaxed ); + do {} while ( !cas16_strong( pDest, cur, v, order, memory_order_relaxed )); + return cur; + } + + //----------------------------------------------------------------------------- + // pointer primitives + //----------------------------------------------------------------------------- + + template + static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + + fence_before(order); + *pDest = src; + fence_after(order); + } + + template + static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + + fence_before( order ); + T * v = *pSrc; + fence_after( order ); + return v; + } + + template + static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + + return cas64_strong( (uint64_t volatile *) pDest, *reinterpret_cast( &expected ), (uint64_t) desired, mo_success, mo_fail ); + } + + template + static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + return (T *) exchange64( (uint64_t volatile *) pDest, (uint64_t) v, order ); + } + + }} // namespace gcc::Sparc + +#ifndef CDS_CXX11_INLINE_NAMESPACE_SUPPORT + using namespace gcc::Sparc; +#endif + } // namespace platform +}} // namespace cds::cxx11_atomics +//@endcond + +#undef CDS_SPARC_MB_ACQ +#undef CDS_SPARC_MB_REL +#undef CDS_SPARC_MB_SEQ_CST +#undef CDS_SPARC_MB_FULL +#undef CDS_SPARC_MB_LL_LS +#undef CDS_SPARC_MB_LS_SS +#undef CDS_SPARC_MB_LL_LS_SS + +#endif // #ifndef __CDS_COMPILER_GCC_AMD64_CXX11_ATOMIC_H diff --git a/cds/compiler/gcc/x86/backoff.h b/cds/compiler/gcc/x86/backoff.h new file mode 100644 index 00000000..074c9c33 --- /dev/null +++ b/cds/compiler/gcc/x86/backoff.h @@ -0,0 +1,39 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_X86_BACKOFF_H +#define __CDS_COMPILER_GCC_X86_BACKOFF_H + +//@cond none + +namespace cds { namespace backoff { + namespace gcc { namespace x86 { + +# define CDS_backoff_pause_defined + static inline void backoff_pause( unsigned int nLoop = 0x000003FF ) + { + asm volatile ( + "andl %[nLoop], %%ecx; \n\t" + "cmovzl %[nLoop], %%ecx; \n\t" + "rep; " + "nop; \n\t" + : /*no output*/ + : [nLoop] "r" (nLoop) + : "ecx", "cc" + ); + } + +# define CDS_backoff_nop_defined + static inline void backoff_nop() + { + asm volatile ( "nop;" ); + } + + }} // namespace gcc::x86 + + namespace platform { + using namespace gcc::x86; + } +}} // namespace cds::backoff + +//@endcond +#endif // #ifndef __CDS_COMPILER_GCC_X86_BACKOFF_H diff --git a/cds/compiler/gcc/x86/bitop.h b/cds/compiler/gcc/x86/bitop.h new file mode 100644 index 00000000..3982fb94 --- /dev/null +++ b/cds/compiler/gcc/x86/bitop.h @@ -0,0 +1,86 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_X86_BITOP_H +#define __CDS_COMPILER_GCC_X86_BITOP_H + +//@cond none +namespace cds { + namespace bitop { namespace platform { namespace gcc { namespace x86 { + // MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0 +# define cds_bitop_msb32_DEFINED + static inline int msb32( atomic32u_t nArg ) + { + int nRet; + __asm__ __volatile__ ( + "bsrl %[nArg], %[nRet] ;\n\t" + "jnz 1f ;\n\t" + "xorl %[nRet], %[nRet] ;\n\t" + "subl $1, %[nRet] ;\n\t" + "1:" + "addl $1, %[nRet] ;\n\t" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return nRet; + } + +# define cds_bitop_msb32nz_DEFINED + static inline int msb32nz( atomic32u_t nArg ) + { + assert( nArg != 0 ); + int nRet; + __asm__ __volatile__ ( + "bsrl %[nArg], %[nRet] ;" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return nRet; + } + + // LSB - return index (0..31) of least significant bit in nArg. If nArg == 0 return -1U +# define cds_bitop_lsb32_DEFINED + static inline int lsb32( atomic32u_t nArg ) + { + + int nRet; + __asm__ __volatile__ ( + "bsfl %[nArg], %[nRet] ;" + "jnz 1f ;" + "xorl %[nRet], %[nRet] ;" + "subl $1, %[nRet] ;" + "1:" + "addl $1, %[nRet] ;" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return nRet; + + } + + // LSB - return index (0..31) of least significant bit in nArg. + // Condition: nArg != 0 +# define cds_bitop_lsb32nz_DEFINED + static inline int lsb32nz( atomic32u_t nArg ) + { + assert( nArg != 0 ); + int nRet; + __asm__ __volatile__ ( + "bsfl %[nArg], %[nRet] ;" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return nRet; + } + + }} // namespace gcc::x86 + + using namespace gcc::x86; + +}}} // namespace cds::bitop::platform +//@endcond + +#endif // #ifndef __CDS_ARH_X86_GCC_BITOP_H diff --git a/cds/compiler/gcc/x86/cxx11_atomic.h b/cds/compiler/gcc/x86/cxx11_atomic.h new file mode 100644 index 00000000..152ccc7c --- /dev/null +++ b/cds/compiler/gcc/x86/cxx11_atomic.h @@ -0,0 +1,184 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_X86_CXX11_ATOMIC_H +#define __CDS_COMPILER_GCC_X86_CXX11_ATOMIC_H + +#include +#include + +//@cond +namespace cds { namespace cxx11_atomics { + namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace x86 { + + //----------------------------------------------------------------------------- + // 64bit primitives + //----------------------------------------------------------------------------- + + template + static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 8 )); + + uint32_t ebxStore; + T prev = expected; + + fence_before(mo_success); + + // We must save EBX in PIC mode + __asm__ __volatile__ ( + "movl %%ebx, %[ebxStore]\n" + "movl %[desiredLo], %%ebx\n" + "lock; cmpxchg8b 0(%[pDest])\n" + "movl %[ebxStore], %%ebx\n" + : [prev] "=A" (prev), [ebxStore] "=m" (ebxStore) + : [desiredLo] "D" ((int)desired), [desiredHi] "c" ((int)(desired >> 32)), [pDest] "S" (pDest), "0" (prev) + : "memory"); + bool success = (prev == expected); + if (success) + fence_after(mo_success); + else { + fence_after(mo_fail); + expected = prev; + } + return success; + } + + template + static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas64_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T load64( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + assert( cds::details::is_aligned( pSrc, 8 )); + + T CDS_DATA_ALIGNMENT(8) v; + __asm__ __volatile__( + "movq (%[pSrc]), %[v] ; \n\t" + : [v] "=x" (v) + : [pSrc] "r" (pSrc) + : + ); + return v; + } + + + template + static inline T exchange64( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 8 )); + + T cur = load64( pDest, memory_order_relaxed ); + do { + } while (!cas64_weak( pDest, cur, v, order, memory_order_relaxed )); + return cur; + } + + template + static inline void store64( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + assert( cds::details::is_aligned( pDest, 8 )); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + // Atomically stores 64bit value by SSE instruction + __asm__ __volatile__( + "movq %[val], (%[pDest]) ; \n\t" + : + : [val] "x" (val), [pDest] "r" (pDest) + : "memory" + ); + } + else { + exchange64( pDest, val, order ); + } + } + + + //----------------------------------------------------------------------------- + // pointer primitives + //----------------------------------------------------------------------------- + + template + static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + + return (T *) exchange32( (uint32_t volatile *) pDest, (uint32_t) v, order ); + } + + template + static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange_ptr( pDest, src, order ); + } + } + + template + static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + + T * v = *pSrc; + fence_after_load( order ); + return v; + } + + template + static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + + return cas32_strong( (uint32_t volatile *) pDest, *reinterpret_cast( &expected ), (uint32_t) desired, mo_success, mo_fail ); + } + + template + static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail ); + } + }} // namespace gcc::x86 + +#ifndef CDS_CXX11_INLINE_NAMESPACE_SUPPORT + using namespace gcc::x86; +#endif + } // namespace platform +}} // namespace cds::cxx11_atomics +//@endcond + +#endif // #ifndef __CDS_COMPILER_GCC_X86_CXX11_ATOMIC_H diff --git a/cds/compiler/gcc/x86/cxx11_atomic32.h b/cds/compiler/gcc/x86/cxx11_atomic32.h new file mode 100644 index 00000000..c945a988 --- /dev/null +++ b/cds/compiler/gcc/x86/cxx11_atomic32.h @@ -0,0 +1,474 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_GCC_X86_CXX11_ATOMIC32_H +#define __CDS_COMPILER_GCC_X86_CXX11_ATOMIC32_H + +#include +#include + +//@cond +namespace cds { namespace cxx11_atomics { + namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace x86 { + + static inline void fence_before( memory_order order ) CDS_NOEXCEPT + { + switch(order) { + case memory_order_relaxed: + case memory_order_acquire: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + } + } + + static inline void fence_after( memory_order order ) CDS_NOEXCEPT + { + switch(order) { + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_relaxed: + case memory_order_consume: + case memory_order_release: + break; + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + } + } + + + static inline void fence_after_load(memory_order order) CDS_NOEXCEPT + { + switch(order) { + case memory_order_relaxed: + case memory_order_release: + break; + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_consume: + break; + case memory_order_seq_cst: + __asm__ __volatile__ ( "mfence" ::: "memory" ); + break; + default:; + } + } + + //----------------------------------------------------------------------------- + // fences + //----------------------------------------------------------------------------- + static inline void thread_fence(memory_order order) CDS_NOEXCEPT + { + switch(order) + { + case memory_order_relaxed: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_seq_cst: + __asm__ __volatile__ ( "mfence" ::: "memory" ); + break; + default:; + } + } + + static inline void signal_fence(memory_order order) CDS_NOEXCEPT + { + // C++11: 29.8.8: only compiler optimization, no hardware instructions + switch(order) + { + case memory_order_relaxed: + break; + case memory_order_consume: + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + default:; + } + } + + //----------------------------------------------------------------------------- + // 8bit primitives + //----------------------------------------------------------------------------- + + template + static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + + T prev = expected; + fence_before(mo_success); + __asm__ __volatile__ ( + "lock ; cmpxchgb %[desired], %[pDest]" + : [prev] "+a" (prev), [pDest] "+m" (*pDest) + : [desired] "q" (desired) + ); + bool success = (prev == expected); + expected = prev; + if (success) + fence_after(mo_success); + else + fence_after(mo_fail); + return success; + } + + template + static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas8_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T exchange8( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + + fence_before(order); + __asm__ __volatile__ ( + "xchgb %[v], %[pDest]" + : [v] "+q" (v), [pDest] "+m" (*pDest) + ); + fence_after(order); + return v; + } + + template + static inline void store8( T volatile * pDest, T src, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange8( pDest, src, order ); + } + } + + template + static inline T load8( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + +# define CDS_ATOMIC_fetch8_add_defined + template + static inline T fetch8_add( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT + { + fence_before(order); + __asm__ __volatile__ ( + "lock ; xaddb %[val], %[pDest]" + : [val] "+q" (val), [pDest] "+m" (*pDest) + ); + fence_after(order); + return val; + } + +# define CDS_ATOMIC_fetch8_sub_defined + template + static inline T fetch8_sub( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT + { + fence_before(order); + __asm__ __volatile__ ( + "negb %[val] ; \n" + "lock ; xaddb %[val], %[pDest]" + : [val] "+q" (val), [pDest] "+m" (*pDest) + ); + fence_after(order); + return val; + } + + //----------------------------------------------------------------------------- + // atomic flag primitives + //----------------------------------------------------------------------------- + + typedef bool atomic_flag_type; + static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order order ) CDS_NOEXCEPT + { + return exchange8( pFlag, true, order ); + } + + static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) CDS_NOEXCEPT + { + store8( pFlag, false, order ); + } + + //----------------------------------------------------------------------------- + // 16bit primitives + //----------------------------------------------------------------------------- + + template + static inline T exchange16( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 2 )); + + fence_before(order); + __asm__ __volatile__ ( + "xchgw %[v], %[pDest]" + : [v] "+q" (v), [pDest] "+m" (*pDest) + ); + fence_after(order); + return v; + } + + template + static inline void store16( T volatile * pDest, T src, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + assert( cds::details::is_aligned( pDest, 2 )); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange16( pDest, src, order ); + } + } + + template + static inline T load16( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + assert( cds::details::is_aligned( pSrc, 2 )); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + + template + static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 2 )); + + T prev = expected; + fence_before(mo_success); + __asm__ __volatile__ ( + "lock ; cmpxchgw %[desired], %[pDest]" + : [prev] "+a" (prev), [pDest] "+m" (*pDest) + : [desired] "q" (desired) + ); + bool success = prev == expected; + if (success) + fence_after(mo_success); + else { + fence_after(mo_fail); + expected = prev; + } + + return success; + } + + template + static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas16_strong( pDest, expected, desired, mo_success, mo_fail ); + } + +# define CDS_ATOMIC_fetch16_add_defined + template + static inline T fetch16_add( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 2 )); + + fence_before(order); + __asm__ __volatile__ ( + "lock ; xaddw %[val], %[pDest]" + : [val] "+q" (val), [pDest] "+m" (*pDest) + ); + fence_after(order); + return val; + } + +# define CDS_ATOMIC_fetch16_sub_defined + template + static inline T fetch16_sub( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 2 )); + + fence_before(order); + __asm__ __volatile__ ( + "negw %[val] ; \n" + "lock ; xaddw %[val], %[pDest]" + : [val] "+q" (val), [pDest] "+m" (*pDest) + ); + fence_after(order); + return val; + } + + //----------------------------------------------------------------------------- + // 32bit primitives + //----------------------------------------------------------------------------- + + template + static inline T exchange32( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 4 )); + + fence_before(order); + __asm__ __volatile__ ( + "xchgl %[v], %[pDest]" + : [v] "+r" (v), [pDest] "+m" (*pDest) + ); + fence_after(order); + return v; + } + + template + static inline void store32( T volatile * pDest, T src, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + assert( cds::details::is_aligned( pDest, 4 )); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange32( pDest, src, order ); + } + } + + template + static inline T load32( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + assert( cds::details::is_aligned( pSrc, 4 )); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + + template + static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 4 )); + + T prev = expected; + fence_before(mo_success); + __asm__ __volatile__ ( + "lock ; cmpxchgl %[desired], %[pDest]" + : [prev] "+a" (prev), [pDest] "+m" (*pDest) + : [desired] "r" (desired) + ); + bool success = prev == expected; + if (success) + fence_after(mo_success); + else { + fence_after(mo_fail); + expected = prev; + } + return success; + } + + template + static inline bool cas32_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas32_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + // fetch_xxx may be emulated via cas32 + // If the platform has special fetch_xxx instruction + // then it should define CDS_ATOMIC_fetch32_xxx_defined macro + +# define CDS_ATOMIC_fetch32_add_defined + template + static inline T fetch32_add( T volatile * pDest, T v, memory_order order) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 4 )); + + fence_before(order); + __asm__ __volatile__ ( + "lock ; xaddl %[v], %[pDest]" + : [v] "+r" (v), [pDest] "+m" (*pDest) + ); + fence_after(order); + return v; + } + +# define CDS_ATOMIC_fetch32_sub_defined + template + static inline T fetch32_sub( T volatile * pDest, T v, memory_order order) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 4 )); + + fence_before(order); + __asm__ __volatile__ ( + "negl %[v] ; \n" + "lock ; xaddl %[v], %[pDest]" + : [v] "+r" (v), [pDest] "+m" (*pDest) + ); + fence_after(order); + return v; + } + + }}} // namespace platform::gcc::x86 +}} // namespace cds::cxx11_atomics +//@endcond + +#endif // #ifndef __CDS_COMPILER_GCC_X86_CXX11_ATOMIC32_H diff --git a/cds/compiler/icl/compiler_barriers.h b/cds/compiler/icl/compiler_barriers.h new file mode 100644 index 00000000..d06e0143 --- /dev/null +++ b/cds/compiler/icl/compiler_barriers.h @@ -0,0 +1,30 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_ICL_COMPILER_BARRIERS_H +#define __CDS_COMPILER_ICL_COMPILER_BARRIERS_H + +#if _MSC_VER > 0 +# if _MSC_VER < 1700 + // VC++ up to vc10 +# include + +# pragma intrinsic(_ReadWriteBarrier) +# pragma intrinsic(_ReadBarrier) +# pragma intrinsic(_WriteBarrier) + +# define CDS_COMPILER_RW_BARRIER _ReadWriteBarrier() +# define CDS_COMPILER_R_BARRIER _ReadBarrier() +# define CDS_COMPILER_W_BARRIER _WriteBarrier() + +# else + // MS VC11+ +# include + +# define CDS_COMPILER_RW_BARRIER std::atomic_thread_fence( std::memory_order_acq_rel ) +# define CDS_COMPILER_R_BARRIER CDS_COMPILER_RW_BARRIER +# define CDS_COMPILER_W_BARRIER CDS_COMPILER_RW_BARRIER + +# endif +#endif + +#endif // #ifndef __CDS_COMPILER_ICL_COMPILER_BARRIERS_H diff --git a/cds/compiler/icl/cxx11_atomic_patches_win.h b/cds/compiler/icl/cxx11_atomic_patches_win.h new file mode 100644 index 00000000..b5719d7e --- /dev/null +++ b/cds/compiler/icl/cxx11_atomic_patches_win.h @@ -0,0 +1,27 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_ICL_CXX11_ATOMIC_PATCHES_WIN_H +#define __CDS_COMPILER_ICL_CXX11_ATOMIC_PATCHES_WIN_H + +//@cond +#if CDS_COMPILER == CDS_COMPILER_INTEL && CDS_CXX11_ATOMIC_SUPPORT == 1 && _MSC_VER < 1700 + +namespace std { + typedef std::atomic atomic_size_t; + + // Fences + static inline void atomic_thread_fence(memory_order order) CDS_NOEXCEPT + { + CDS_COMPILER_RW_BARRIER; + } + static inline void atomic_signal_fence(memory_order order) CDS_NOEXCEPT + { + CDS_COMPILER_RW_BARRIER; + } + +} + +#endif +//@endcond + +#endif // #ifndef __CDS_COMPILER_ICL_CXX11_ATOMIC_PATCHES_WIN_H diff --git a/cds/compiler/icl/defs.h b/cds/compiler/icl/defs.h new file mode 100644 index 00000000..96391fd4 --- /dev/null +++ b/cds/compiler/icl/defs.h @@ -0,0 +1,207 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_ICL_DEFS_H +#define __CDS_COMPILER_ICL_DEFS_H +//@cond + +// Compiler version +#ifdef __ICL +# define CDS_COMPILER_VERSION __ICL +#else +# define CDS_COMPILER_VERSION __INTEL_COMPILER +#endif + +// Compiler name +// Supported compilers: MS VC 2008, 2010, 2012 +// +# define CDS_COMPILER__NAME "Intel C++" +# define CDS_COMPILER__NICK "icl" + +// OS name +#if defined(_WIN64) +# define CDS_OS_INTERFACE CDS_OSI_WINDOWS +# define CDS_OS_TYPE CDS_OS_WIN64 +# define CDS_OS__NAME "Win64" +# define CDS_OS__NICK "Win64" +#elif defined(_WIN32) +# define CDS_OS_INTERFACE CDS_OSI_WINDOWS +# define CDS_OS_TYPE CDS_OS_WIN32 +# define CDS_OS__NAME "Win32" +# define CDS_OS__NICK "Win32" +#elif defined( __linux__ ) +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_LINUX +# define CDS_OS__NAME "linux" +# define CDS_OS__NICK "linux" +#endif + +// Processor architecture +#if defined(_M_X64) || defined(_M_AMD64) +# define CDS_BUILD_BITS 64 +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_AMD64 +# define CDS_PROCESSOR__NAME "AMD64" +# define CDS_PROCESSOR__NICK "amd64" +#elif defined(_M_IX86) +# define CDS_BUILD_BITS 32 +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_X86 +# define CDS_PROCESSOR__NAME "Intel x86" +# define CDS_PROCESSOR__NICK "x86" +#else +# define CDS_BUILD_BITS -1 +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_UNKNOWN +# define CDS_PROCESSOR__NAME "<>" +# error Intel C++ compiler is supported for x86 only +#endif + +#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS +# define __attribute__( _x ) +# define CDS_STDCALL __stdcall +#else +# if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 +# define CDS_STDCALL __attribute__((stdcall)) +# else +# define CDS_STDCALL +# endif +#endif + +#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS +# ifdef CDS_BUILD_LIB +# define CDS_EXPORT_API __declspec(dllexport) +# else +# define CDS_EXPORT_API __declspec(dllimport) +# endif +#endif + +#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS +# define alignof __alignof +#else +# define alignof __alignof__ +#endif + +//#if CDS_COMPILER_VERSION < 1600 +//# include +//# define static_assert(_expr, _msg) BOOST_STATIC_ASSERT((_expr)) +//#endif + +// Memory leaks detection (debug build only) +//#ifdef _DEBUG +//# define _CRTDBG_MAP_ALLOC +//# define _CRTDBG_MAPALLOC +//# include +//# include +//# define CDS_MSVC_MEMORY_LEAKS_DETECTING_ENABLED +//#endif + +#if CDS_COMPILER_VERSION < 1400 +# define CDS_CONSTEXPR +# define CDS_CONSTEXPR_CONST const +#else +# define CDS_CONSTEXPR constexpr +# define CDS_CONSTEXPR_CONST constexpr const +#endif + +// noexcept is not yet supported +#if CDS_COMPILER_VERSION < 1400 +# define CDS_NOEXCEPT_SUPPORT +# define CDS_NOEXCEPT_SUPPORT_(expr) +#else +# define CDS_NOEXCEPT_SUPPORT noexcept +# define CDS_NOEXCEPT_SUPPORT_(expr) noexcept(expr) +#endif + +// C++11 atomic support +#if CDS_COMPILER_VERSION >= 1300 && CDS_OS_INTERFACE == CDS_OSI_WINDOWS && _MSC_VER >= 1700 + // Intel C++ 13 supports C++11 atomic standard for VC++ 2012 +# define CDS_CXX11_ATOMIC_SUPPORT 1 +#endif + + +// Lambda (ICL 12 +) +#if CDS_COMPILER_VERSION >= 1200 +# define CDS_CXX11_LAMBDA_SUPPORT +#endif + +// RValue (ICL 10+) +#if CDS_COMPILER_VERSION >= 1200 +# define CDS_RVALUE_SUPPORT +# define CDS_MOVE_SEMANTICS_SUPPORT +#endif + +// Default template arguments for function templates (ICL 12.1+) +#if CDS_COMPILER_VERSION >= 1201 +# define CDS_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS_SUPPORT +#endif + +// C++11 delete definition ( function declaration = delete) +#if CDS_COMPILER_VERSION >= 1200 +# define CDS_CXX11_DELETE_DEFINITION_SUPPORT +#endif + +// C++11 explicitly-defaulted function (= default) [std 8.4.2 [dcl.fct.def.default]] +#if CDS_COMPILER_VERSION >= 1200 +# define CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT +#endif + +// Variadic template support (ICL 12.1+) +#if CDS_COMPILER_VERSION >= 1201 +# define CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT 1 +#endif + +// C++11 template alias +#if CDS_COMPILER_VERSION >= 1201 +# define CDS_CXX11_TEMPLATE_ALIAS_SUPPORT +#endif + +// C++11 inline namespace +#if CDS_COMPILER_VERSION >= 1400 +# define CDS_CXX11_INLINE_NAMESPACE_SUPPORT +#endif + +// Explicit conversion operator +//#if CDS_COMPILER_VERSION >= 1800 +//# define CDS_CXX11_EXPLICIT_CONVERSION_OPERATOR_SUPPORT +//#endif + + +// +// +#if _MSC_VER == 1500 +// MS VC 2008 has no +# include +#else +# include +#endif + +// Thread support library (thread, mutex, condition variable) +#if _MSC_VER >= 1700 + // MS VC 11+ +# define CDS_CXX11_STDLIB_THREAD +# define CDS_CXX11_STDLIB_MUTEX +# define CDS_CXX11_STDLIB_CONDITION_VARIABLE +# define CDS_CXX11_STDLIB_CHRONO +#endif + +#if defined( CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT ) && defined(CDS_MOVE_SEMANTICS_SUPPORT) && CDS_COMPILER_VERSION < 1400 + // Intel C++ bug: move ctor & assignment operator cannot be defaulted + // http://software.intel.com/en-us/forums/topic/394395 +# define CDS_DISABLE_DEFAULT_MOVE_CTOR +#endif + + +// ************************************************* +// Alignment macro + +#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS +# define CDS_TYPE_ALIGNMENT(n) __declspec( align(n) ) +# define CDS_DATA_ALIGNMENT(n) __declspec( align(n) ) +# define CDS_CLASS_ALIGNMENT(n) __declspec( align(n) ) +#else +# define CDS_TYPE_ALIGNMENT(n) __attribute__ ((aligned (n))) +# define CDS_CLASS_ALIGNMENT(n) __attribute__ ((aligned (n))) +# define CDS_DATA_ALIGNMENT(n) __attribute__ ((aligned (n))) +#endif + +#include + +//@endcond +#endif // #ifndef __CDS_COMPILER_VC_DEFS_H diff --git a/cds/compiler/vc/amd64/backoff.h b/cds/compiler/vc/amd64/backoff.h new file mode 100644 index 00000000..fa569a77 --- /dev/null +++ b/cds/compiler/vc/amd64/backoff.h @@ -0,0 +1,32 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_VC_AMD64_BACKOFF_H +#define __CDS_COMPILER_VC_AMD64_BACKOFF_H + +//@cond none + +namespace cds { namespace backoff { + namespace vc { namespace amd64 { + +# define CDS_backoff_pause_defined + static inline void backoff_pause( unsigned int nLoop = 0x000003FF ) + { + for ( unsigned int i = 0; i < nLoop; i++ ) + __nop(); + } + +# define CDS_backoff_nop_defined + static inline void backoff_nop() + { + __nop(); + } + + }} // namespace vc::amd64 + + namespace platform { + using namespace vc::amd64; + } +}} // namespace cds::backoff + +//@endcond +#endif // #ifndef __CDS_COMPILER_VC_AMD64_BACKOFF_H diff --git a/cds/compiler/vc/amd64/bitop.h b/cds/compiler/vc/amd64/bitop.h new file mode 100644 index 00000000..ee143d6f --- /dev/null +++ b/cds/compiler/vc/amd64/bitop.h @@ -0,0 +1,126 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_VC_AMD64_BITOP_H +#define __CDS_COMPILER_VC_AMD64_BITOP_H + +#if _MSC_VER == 1500 + /* + VC 2008 bug: + math.h(136) : warning C4985: 'ceil': attributes not present on previous declaration. + intrin.h(142) : see declaration of 'ceil' + + See http://connect.microsoft.com/VisualStudio/feedback/details/381422/warning-of-attributes-not-present-on-previous-declaration-on-ceil-using-both-math-h-and-intrin-h + */ +# pragma warning(push) +# pragma warning(disable: 4985) +# include +# pragma warning(pop) +#else +# include +#endif + +#pragma intrinsic(_BitScanReverse) +#pragma intrinsic(_BitScanForward) +#pragma intrinsic(_BitScanReverse64) +#pragma intrinsic(_BitScanForward64) + +//@cond none +namespace cds { + namespace bitop { namespace platform { namespace vc { namespace amd64 { + + // MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0 +# define cds_bitop_msb32_DEFINED + static inline int msb32( atomic32u_t nArg ) + { + unsigned long nIndex; + if ( _BitScanReverse( &nIndex, nArg )) + return (int) nIndex + 1; + return 0; + } + +# define cds_bitop_msb32nz_DEFINED + static inline int msb32nz( atomic32u_t nArg ) + { + assert( nArg != 0 ); + unsigned long nIndex; + _BitScanReverse( &nIndex, nArg ); + return (int) nIndex; + } + + // LSB - return index (1..32) of least significant bit in nArg. If nArg == 0 return -1U +# define cds_bitop_lsb32_DEFINED + static inline int lsb32( atomic32u_t nArg ) + { + unsigned long nIndex; + if ( _BitScanForward( &nIndex, nArg )) + return (int) nIndex + 1; + return 0; + } + +# define cds_bitop_lsb32nz_DEFINED + static inline int lsb32nz( atomic32u_t nArg ) + { + assert( nArg != 0 ); + unsigned long nIndex; + _BitScanForward( &nIndex, nArg ); + return (int) nIndex; + } + + +# define cds_bitop_msb64_DEFINED + static inline int msb64( atomic64u_unaligned nArg ) + { + unsigned long nIndex; + if ( _BitScanReverse64( &nIndex, nArg )) + return (int) nIndex + 1; + return 0; + } + +# define cds_bitop_msb64nz_DEFINED + static inline int msb64nz( atomic64u_unaligned nArg ) + { + assert( nArg != 0 ); + unsigned long nIndex; + _BitScanReverse64( &nIndex, nArg ); + return (int) nIndex; + } + +# define cds_bitop_lsb64_DEFINED + static inline int lsb64( atomic64u_unaligned nArg ) + { + unsigned long nIndex; + if ( _BitScanForward64( &nIndex, nArg )) + return (int) nIndex + 1; + return 0; + } + +# define cds_bitop_lsb64nz_DEFINED + static inline int lsb64nz( atomic64u_unaligned nArg ) + { + assert( nArg != 0 ); + unsigned long nIndex; + _BitScanForward64( &nIndex, nArg ); + return (int) nIndex; + } + +# define cds_bitop_complement32_DEFINED + static inline bool complement32( atomic32u_t * pArg, unsigned int nBit ) + { + return _bittestandcomplement( reinterpret_cast( pArg ), nBit ) != 0; + } + +# define cds_bitop_complement64_DEFINED + static inline bool complement64( atomic64u_t * pArg, unsigned int nBit ) + { + return _bittestandcomplement64( reinterpret_cast<__int64 *>( pArg ), nBit ) != 0; + } + + + }} // namespace vc::amd64 + + using namespace vc::amd64; + +}}} // namespace cds::bitop::platform +//@endcond + +#endif // #ifndef __CDS_COMPILER_VC_AMD64_BITOP_H diff --git a/cds/compiler/vc/amd64/cxx11_atomic.h b/cds/compiler/vc/amd64/cxx11_atomic.h new file mode 100644 index 00000000..49b2f2c3 --- /dev/null +++ b/cds/compiler/vc/amd64/cxx11_atomic.h @@ -0,0 +1,584 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_VC_AMD64_CXX11_ATOMIC_H +#define __CDS_COMPILER_VC_AMD64_CXX11_ATOMIC_H + +#include +#include // for 128bit atomic load/store +#include + +#pragma intrinsic( _InterlockedIncrement ) +#pragma intrinsic( _InterlockedDecrement ) +#pragma intrinsic( _InterlockedCompareExchange ) +#pragma intrinsic( _InterlockedCompareExchangePointer ) +#pragma intrinsic( _InterlockedCompareExchange16 ) +#pragma intrinsic( _InterlockedCompareExchange64 ) +#pragma intrinsic( _InterlockedExchange ) +#pragma intrinsic( _InterlockedExchange64 ) +#pragma intrinsic( _InterlockedExchangePointer ) +#pragma intrinsic( _InterlockedExchangeAdd ) +#pragma intrinsic( _InterlockedExchangeAdd64 ) +//#pragma intrinsic( _InterlockedAnd ) +//#pragma intrinsic( _InterlockedOr ) +//#pragma intrinsic( _InterlockedXor ) +//#pragma intrinsic( _InterlockedAnd64 ) +//#pragma intrinsic( _InterlockedOr64 ) +//#pragma intrinsic( _InterlockedXor64 ) +#pragma intrinsic( _interlockedbittestandset ) +#if _MSC_VER >= 1600 +# pragma intrinsic( _InterlockedCompareExchange8 ) +# pragma intrinsic( _InterlockedExchange8 ) +# pragma intrinsic( _InterlockedExchange16 ) +#endif + +//@cond +namespace cds { namespace cxx11_atomics { + namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace vc { CDS_CXX11_INLINE_NAMESPACE namespace amd64 { + + static inline void fence_before( memory_order order ) CDS_NOEXCEPT + { + switch(order) { + case memory_order_relaxed: + case memory_order_acquire: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + } + } + + static inline void fence_after( memory_order order ) CDS_NOEXCEPT + { + switch(order) { + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_relaxed: + case memory_order_consume: + case memory_order_release: + break; + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + } + } + + static inline void full_fence() + { + // MS VC does not support inline assembler in C code. + // So, we use InterlockedExchange for full fence instead of mfence inst + long t; + _InterlockedExchange( &t, 0 ); + } + + static inline void fence_after_load(memory_order order) CDS_NOEXCEPT + { + switch(order) { + case memory_order_relaxed: + case memory_order_release: + break; + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_consume: + break; + case memory_order_seq_cst: + full_fence(); + break; + default:; + } + } + + //----------------------------------------------------------------------------- + // fences + //----------------------------------------------------------------------------- + static inline void thread_fence(memory_order order) CDS_NOEXCEPT + { + switch(order) + { + case memory_order_relaxed: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_seq_cst: + full_fence(); + break; + default:; + } + } + + static inline void signal_fence(memory_order order) CDS_NOEXCEPT + { + // C++11: 29.8.8: only compiler optimization, no hardware instructions + switch(order) + { + case memory_order_relaxed: + break; + case memory_order_consume: + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + default:; + } + } + + //----------------------------------------------------------------------------- + // atomic flag primitives + //----------------------------------------------------------------------------- + + typedef unsigned char atomic_flag_type; + static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order /*order*/ ) CDS_NOEXCEPT + { + return _interlockedbittestandset( (long volatile *) pFlag, 0 ) != 0; + } + + static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) CDS_NOEXCEPT + { + assert( order != memory_order_acquire + && order != memory_order_acq_rel + ); + + fence_before( order ); + *pFlag = 0; + fence_after( order ); + } + + //----------------------------------------------------------------------------- + // 8bit primitives + //----------------------------------------------------------------------------- + +#if _MSC_VER >= 1600 +# pragma warning(push) + // Disable warning C4800: 'char' : forcing value to bool 'true' or 'false' (performance warning) +# pragma warning( disable: 4800 ) +#endif + template + static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + +# if _MSC_VER >= 1600 + // VC 2010 + + T prev = expected; + expected = (T) _InterlockedCompareExchange8( (char volatile*) pDest, (char) desired, (char) expected ); + return expected == prev; +# else + // VC 2008 + unsigned int * pnDest = (unsigned int *)( ((unsigned __int64) pDest) & ~(unsigned __int64(3)) ); + unsigned int nOffset = ((unsigned __int64) pDest) & 3; + unsigned int nExpected; + unsigned int nDesired; + + for (;;) { + nExpected = + nDesired = *pnDest; + memcpy( reinterpret_cast(&nExpected) + nOffset, &expected, sizeof(T)); + memcpy( reinterpret_cast(&nDesired) + nOffset, &desired, sizeof(T)); + + unsigned int nPrev = (unsigned int) _InterlockedCompareExchange( (long *) pnDest, (long) nDesired, (long) nExpected ); + if ( nPrev == nExpected ) + return true; + T nByte; + memcpy( &nByte, reinterpret_cast(&nPrev) + nOffset, sizeof(T)); + if ( nByte != expected ) { + expected = nByte; + return false; + } + } +# endif + } +#if _MSC_VER >= 1600 +# pragma warning(pop) +#endif + + template + static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas8_strong( pDest, expected, desired, mo_success, mo_fail ); + } + +#if _MSC_VER >= 1600 +# pragma warning(push) + // Disable warning C4800: 'char' : forcing value to bool 'true' or 'false' (performance warning) +# pragma warning( disable: 4800 ) +#endif + template + static inline T exchange8( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + +# if _MSC_VER >= 1600 + CDS_UNUSED(order); + return (T) _InterlockedExchange8( (char volatile *) pDest, (char) v ); +# else + T expected = *pDest; + do {} while ( !cas8_strong( pDest, expected, v, order, memory_order_relaxed )); + return expected; +# endif + } +#if _MSC_VER >= 1600 +# pragma warning(pop) +#endif + + template + static inline void store8( T volatile * pDest, T src, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange8( pDest, src, order ); + } + } + + template + static inline T load8( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + + //----------------------------------------------------------------------------- + // 16bit primitives + //----------------------------------------------------------------------------- + + template + static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 2 )); + + // _InterlockedCompareExchange behave as read-write memory barriers + T prev = expected; + expected = (T) _InterlockedCompareExchange16( (short *) pDest, (short) desired, (short) expected ); + return expected == prev; + } + + template + static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas16_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T exchange16( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 2 )); + +# if _MSC_VER >= 1600 + order; + return (T) _InterlockedExchange16( (short volatile *) pDest, (short) v ); +# else + T expected = *pDest; + do {} while ( !cas16_strong( pDest, expected, v, order, memory_order_relaxed )); + return expected; +# endif + } + + template + static inline void store16( T volatile * pDest, T src, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + assert( cds::details::is_aligned( pDest, 2 )); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange16( pDest, src, order ); + } + } + + template + static inline T load16( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + assert( cds::details::is_aligned( pSrc, 2 )); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + + //----------------------------------------------------------------------------- + // 32bit primitives + //----------------------------------------------------------------------------- + + template + static inline T exchange32( T volatile * pDest, T v, memory_order /*order*/ ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 4 )); + + return (T) _InterlockedExchange( (long *) pDest, (long) v ); + } + + template + static inline void store32( T volatile * pDest, T src, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + assert( cds::details::is_aligned( pDest, 4 )); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange32( pDest, src, order ); + } + } + + template + static inline T load32( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + assert( cds::details::is_aligned( pSrc, 4 )); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + + template + static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 4 )); + + // _InterlockedCompareExchange behave as read-write memory barriers + T prev = expected; + expected = (T) _InterlockedCompareExchange( (long *) pDest, (long) desired, (long) expected ); + return expected == prev; + } + + template + static inline bool cas32_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas32_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + // fetch_xxx may be emulated via cas32 + // If the platform has special fetch_xxx instruction + // then it should define CDS_ATOMIC_fetch32_xxx_defined macro + +# define CDS_ATOMIC_fetch32_add_defined + template + static inline T fetch32_add( T volatile * pDest, T v, memory_order /*order*/) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 4 )); + + // _InterlockedExchangeAdd behave as read-write memory barriers + return (T) _InterlockedExchangeAdd( (long *) pDest, (long) v ); + } + + //----------------------------------------------------------------------------- + // 64bit primitives + //----------------------------------------------------------------------------- + + template + static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 8 )); + + // _InterlockedCompareExchange behave as read-write memory barriers + T prev = expected; + expected = (T) _InterlockedCompareExchange64( (__int64 *) pDest, (__int64) desired, (__int64) expected ); + return expected == prev; + } + + template + static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas64_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T load64( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + assert( cds::details::is_aligned( pSrc, 8 )); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + + + template + static inline T exchange64( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + + T cur = load64( pDest, memory_order_relaxed ); + do { + } while (!cas64_weak( pDest, cur, v, order, memory_order_relaxed )); + return cur; + } + + template + static inline void store64( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + assert( cds::details::is_aligned( pDest, 8 )); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = val; + } + else { + exchange64( pDest, val, order ); + } + } + +# define CDS_ATOMIC_fetch64_add_defined + template + static inline T fetch64_add( T volatile * pDest, T v, memory_order /*order*/) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 8 )); + + // _InterlockedExchangeAdd64 behave as read-write memory barriers + return (T) _InterlockedExchangeAdd64( (__int64 *) pDest, (__int64) v ); + } + + //----------------------------------------------------------------------------- + // pointer primitives + //----------------------------------------------------------------------------- + + template + static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order /*order*/ ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + return (T *) _InterlockedExchangePointer( (void * volatile *) pDest, reinterpret_cast(v) ); + } + + template + static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange_ptr( pDest, src, order ); + } + } + + template + static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + + T * v = *pSrc; + fence_after_load( order ); + return v; + } + + template + static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + + // _InterlockedCompareExchangePointer behave as read-write memory barriers + T * prev = expected; + expected = (T *) _InterlockedCompareExchangePointer( (void * volatile *) pDest, (void *) desired, (void *) expected ); + return expected == prev; + } + + template + static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + }} // namespace vc::amd64 + +#ifndef CDS_CXX11_INLINE_NAMESPACE_SUPPORT + using namespace vc::amd64; +#endif + } // namespace platform +}} // namespace cds::cxx11_atomics +//@endcond + +#endif // #ifndef __CDS_COMPILER_VC_AMD64_CXX11_ATOMIC_H diff --git a/cds/compiler/vc/compiler_barriers.h b/cds/compiler/vc/compiler_barriers.h new file mode 100644 index 00000000..cdeb2002 --- /dev/null +++ b/cds/compiler/vc/compiler_barriers.h @@ -0,0 +1,29 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_VC_COMPILER_BARRIERS_H +#define __CDS_COMPILER_VC_COMPILER_BARRIERS_H + +#if CDS_COMPILER_VERSION < 1700 + // VC++ up to vc10 + +# include + +# pragma intrinsic(_ReadWriteBarrier) +# pragma intrinsic(_ReadBarrier) +# pragma intrinsic(_WriteBarrier) + +# define CDS_COMPILER_RW_BARRIER _ReadWriteBarrier() +# define CDS_COMPILER_R_BARRIER _ReadBarrier() +# define CDS_COMPILER_W_BARRIER _WriteBarrier() + +#else + // MS VC11+ +# include + +# define CDS_COMPILER_RW_BARRIER std::atomic_thread_fence( std::memory_order_acq_rel ) +# define CDS_COMPILER_R_BARRIER CDS_COMPILER_RW_BARRIER +# define CDS_COMPILER_W_BARRIER CDS_COMPILER_RW_BARRIER + +#endif + +#endif // #ifndef __CDS_COMPILER_VC_COMPILER_BARRIERS_H diff --git a/cds/compiler/vc/defs.h b/cds/compiler/vc/defs.h new file mode 100644 index 00000000..e24d30dd --- /dev/null +++ b/cds/compiler/vc/defs.h @@ -0,0 +1,196 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_VC_DEFS_H +#define __CDS_COMPILER_VC_DEFS_H +//@cond + +// Compiler version +#define CDS_COMPILER_VERSION _MSC_VER + +// Compiler name +// Supported compilers: MS VC 2008, 2010, 2012, 2013 +// C++ compiler versions: +#define CDS_COMPILER_MSVC9 1500 // 2008 vc9 +#define CDS_COMPILER_MSVC10 1600 // 2010 vc10 +#define CDS_COMPILER_MSVC11 1700 // 2012 vc11 +#define CDS_COMPILER_MSVC12 1800 // 2013 vc12 + +#if _MSC_VER == 1500 +# define CDS_COMPILER__NAME "MS Visual C++ 2008" +# define CDS_COMPILER__NICK "vc9" +#elif _MSC_VER == 1600 +# define CDS_COMPILER__NAME "MS Visual C++ 2010" +# define CDS_COMPILER__NICK "vc10" +#elif _MSC_VER == 1700 +# define CDS_COMPILER__NAME "MS Visual C++ 2012" +# define CDS_COMPILER__NICK "vc11" +#elif _MSC_VER == 1800 +# define CDS_COMPILER__NAME "MS Visual C++ 2013" +# define CDS_COMPILER__NICK "vc12" +#else +# define CDS_COMPILER__NAME "MS Visual C++" +# define CDS_COMPILER__NICK "msvc" +#endif + +// OS interface +#define CDS_OS_INTERFACE CDS_OSI_WINDOWS + +// OS name +#if defined(_WIN64) +# define CDS_OS_TYPE CDS_OS_WIN64 +# define CDS_OS__NAME "Win64" +# define CDS_OS__NICK "Win64" +#elif defined(_WIN32) +# define CDS_OS_TYPE CDS_OS_WIN32 +# define CDS_OS__NAME "Win32" +# define CDS_OS__NICK "Win32" +#endif + +// Processor architecture +#ifdef _M_IX86 +# define CDS_BUILD_BITS 32 +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_X86 +# define CDS_PROCESSOR__NAME "Intel x86" +# define CDS_PROCESSOR__NICK "x86" +#elif _M_X64 +# define CDS_BUILD_BITS 64 +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_AMD64 +# define CDS_PROCESSOR__NAME "AMD64" +# define CDS_PROCESSOR__NICK "amd64" +#else +# define CDS_BUILD_BITS -1 +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_UNKNOWN +# define CDS_PROCESSOR__NAME "<>" +# error Microsoft Visual C++ compiler is supported for x86 only +#endif + + +#define __attribute__( _x ) + +#define CDS_STDCALL __stdcall + +#ifdef CDS_BUILD_LIB +# define CDS_EXPORT_API __declspec(dllexport) +#else +# define CDS_EXPORT_API __declspec(dllimport) +#endif + +#define alignof __alignof + +#if CDS_COMPILER_VERSION < 1600 +# include +# define static_assert(_expr, _msg) BOOST_STATIC_ASSERT((_expr)) +#endif + +// Memory leaks detection (debug build only) +#ifdef _DEBUG +# define _CRTDBG_MAP_ALLOC +# define _CRTDBG_MAPALLOC +# include +# include +# define CDS_MSVC_MEMORY_LEAKS_DETECTING_ENABLED +#endif + +// constexpr is not yet supported +#define CDS_CONSTEXPR +#define CDS_CONSTEXPR_CONST const + +// noexcept is not yet supported +//#define CDS_NOEXCEPT_SUPPORT noexcept +//#define CDS_NOEXCEPT_SUPPORT_(expr) noexcept(expr) +#define CDS_NOEXCEPT_SUPPORT +#define CDS_NOEXCEPT_SUPPORT_(expr) + +// C++11 atomic support +// MSVC 2012 has implementation but all load/store is based on CAS +// that is quite inefficient. +// So for VC 2012 we use internal implementation for atomics +#if CDS_COMPILER_VERSION >= 1800 +# define CDS_CXX11_ATOMIC_SUPPORT 1 +#endif + + +// Lambda (VC 10 +) +#if CDS_COMPILER_VERSION >= 1600 +# define CDS_CXX11_LAMBDA_SUPPORT +# if CDS_COMPILER_VERSION < 1700 +# define CDS_BUG_STATIC_MEMBER_IN_LAMBDA +# endif +#endif + +// RValue (VC 10+) +#if CDS_COMPILER_VERSION >= 1600 +# define CDS_RVALUE_SUPPORT +# define CDS_MOVE_SEMANTICS_SUPPORT +#endif + +// Default template arguments for function templates (VC12+) +#if CDS_COMPILER_VERSION >= 1800 +# define CDS_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS_SUPPORT +#endif + +// C++11 delete definition ( function declaration = delete) +#if CDS_COMPILER_VERSION >= 1800 +# define CDS_CXX11_DELETE_DEFINITION_SUPPORT +#endif + +// C++11 explicitly-defaulted function (= default) [std 8.4.2 [dcl.fct.def.default]] +#if CDS_COMPILER_VERSION >= 1800 +# define CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT +#endif + +// Variadic template support (VC12+) +#if CDS_COMPILER_VERSION >= 1800 +# define CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT 1 +#endif + +// C++11 template alias +#if CDS_COMPILER_VERSION >= 1800 +# define CDS_CXX11_TEMPLATE_ALIAS_SUPPORT +#endif + +// C++11 inline namespace +//#define CDS_CXX11_INLINE_NAMESPACE_SUPPORT + +// Explicit conversion operator (VC12+) +#if CDS_COMPILER_VERSION >= 1800 +# define CDS_CXX11_EXPLICIT_CONVERSION_OPERATOR_SUPPORT +#endif + + +// +#if CDS_COMPILER_VERSION == 1500 + // MS VC 2008 has no +# include +#else +# include +#endif + +// Thread support library (thread, mutex, condition variable) +#if CDS_COMPILER_VERSION >= 1700 + // MS VC 11+ +# define CDS_CXX11_STDLIB_THREAD +# define CDS_CXX11_STDLIB_MUTEX +# define CDS_CXX11_STDLIB_CONDITION_VARIABLE +# define CDS_CXX11_STDLIB_CHRONO +#endif + +// Full SFINAE support +//#if CDS_COMPILER_VERSION >= ???? +//# define CDS_CXX11_SFINAE +//#endif + + +// ************************************************* +// Alignment macro + +// VC 2005 generates error C2719 "formal parameter with __declspec(align('#')) won't be aligned" +// for function's formal parameter with align declspec +#define CDS_TYPE_ALIGNMENT(n) __declspec( align(n) ) +#define CDS_DATA_ALIGNMENT(n) __declspec( align(n) ) +#define CDS_CLASS_ALIGNMENT(n) __declspec( align(n) ) + +#include + +//@endcond +#endif // #ifndef __CDS_COMPILER_VC_DEFS_H diff --git a/cds/compiler/vc/x86/backoff.h b/cds/compiler/vc/x86/backoff.h new file mode 100644 index 00000000..24dc75df --- /dev/null +++ b/cds/compiler/vc/x86/backoff.h @@ -0,0 +1,35 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_VC_X86_BACKOFF_H +#define __CDS_COMPILER_VC_X86_BACKOFF_H + +//@cond none + +namespace cds { namespace backoff { + namespace vc { namespace x86 { + +# define CDS_backoff_pause_defined + static inline void backoff_pause( unsigned int nLoop = 0x000003FF ) + { + __asm { + and ecx, nLoop; + cmovz ecx, nLoop; + rep nop; + } + } + +# define CDS_backoff_nop_defined + static inline void backoff_nop() + { + __nop(); + } + + }} // namespace vc::x86 + + namespace platform { + using namespace vc::x86; + } +}} // namespace cds::backoff + +//@endcond +#endif // #ifndef __CDS_COMPILER_VC_X86_BACKOFF_H diff --git a/cds/compiler/vc/x86/bitop.h b/cds/compiler/vc/x86/bitop.h new file mode 100644 index 00000000..f2772005 --- /dev/null +++ b/cds/compiler/vc/x86/bitop.h @@ -0,0 +1,83 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_VC_X86_BITOP_H +#define __CDS_COMPILER_VC_X86_BITOP_H + +#include +#pragma intrinsic(_BitScanReverse) +#pragma intrinsic(_BitScanForward) + +//@cond none +namespace cds { + namespace bitop { namespace platform { namespace vc { namespace x86 { + // MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0 +# define cds_bitop_msb32_DEFINED + static inline int msb32( atomic32u_t nArg ) + { + unsigned long nIndex; + if ( _BitScanReverse( &nIndex, nArg )) + return (int) nIndex + 1; + return 0; + } + +# define cds_bitop_msb32nz_DEFINED + static inline int msb32nz( atomic32u_t nArg ) + { + assert( nArg != 0 ); + unsigned long nIndex; + _BitScanReverse( &nIndex, nArg ); + return (int) nIndex; + } + + // LSB - return index (1..32) of least significant bit in nArg. If nArg == 0 return -1U +# define cds_bitop_lsb32_DEFINED + static inline int lsb32( atomic32u_t nArg ) + { + unsigned long nIndex; + if ( _BitScanForward( &nIndex, nArg )) + return (int) nIndex + 1; + return 0; + } + +# define cds_bitop_lsb32nz_DEFINED + static inline int lsb32nz( atomic32u_t nArg ) + { + assert( nArg != 0 ); + unsigned long nIndex; + _BitScanForward( &nIndex, nArg ); + return (int) nIndex; + } + + // bswap - Reverses the byte order of a 32-bit word +# define cds_bitop_bswap32_DEFINED + static inline atomic32u_t bswap32( atomic32u_t nArg ) + { + __asm { + mov eax, nArg; + bswap eax; + } + } + +# define cds_bitop_complement32_DEFINED + static inline bool complement32( atomic32u_t * pArg, unsigned int nBit ) + { + return _bittestandcomplement( reinterpret_cast( pArg ), nBit ) != 0; + } + +# define cds_bitop_complement64_DEFINED + static inline bool complement64( atomic64u_t * pArg, unsigned int nBit ) + { + if ( nBit < 32 ) + return _bittestandcomplement( reinterpret_cast( pArg ), nBit ) != 0; + else + return _bittestandcomplement( reinterpret_cast( pArg ) + 1, nBit - 32 ) != 0; + } + + }} // namespace vc::x86 + + using namespace vc::x86; + +}}} // namespace cds::bitop::platform +//@endcond + +#endif // #ifndef __CDS_COMPILER_VC_X86_BITOP_H diff --git a/cds/compiler/vc/x86/cxx11_atomic.h b/cds/compiler/vc/x86/cxx11_atomic.h new file mode 100644 index 00000000..4670a3b1 --- /dev/null +++ b/cds/compiler/vc/x86/cxx11_atomic.h @@ -0,0 +1,556 @@ +//$$CDS-header$$ + +#ifndef __CDS_COMPILER_VC_X86_CXX11_ATOMIC_H +#define __CDS_COMPILER_VC_X86_CXX11_ATOMIC_H + +#include +#include // for 64bit atomic load/store +#include + +#pragma intrinsic( _InterlockedIncrement ) +#pragma intrinsic( _InterlockedDecrement ) +#pragma intrinsic( _InterlockedCompareExchange ) +//#pragma intrinsic( _InterlockedCompareExchangePointer ) // On the x86 architecture, _InterlockedCompareExchangePointer is a macro that calls _InterlockedCompareExchange +#pragma intrinsic( _InterlockedCompareExchange16 ) +#pragma intrinsic( _InterlockedCompareExchange64 ) +#pragma intrinsic( _InterlockedExchange ) +//#pragma intrinsic( _InterlockedExchangePointer ) // On the x86 architecture, _InterlockedExchangePointer is a macro that calls _InterlockedExchange +#pragma intrinsic( _InterlockedExchangeAdd ) +#pragma intrinsic( _InterlockedXor ) +#pragma intrinsic( _InterlockedOr ) +#pragma intrinsic( _InterlockedAnd ) +#pragma intrinsic( _interlockedbittestandset ) +#if _MSC_VER >= 1600 +# pragma intrinsic( _InterlockedCompareExchange8 ) +# pragma intrinsic( _InterlockedExchange8 ) +# pragma intrinsic( _InterlockedExchange16 ) +#endif + +//@cond +namespace cds { namespace cxx11_atomics { + namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace vc { CDS_CXX11_INLINE_NAMESPACE namespace x86 { + + static inline void fence_before( memory_order order ) CDS_NOEXCEPT + { + switch(order) { + case memory_order_relaxed: + case memory_order_acquire: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + } + } + + static inline void fence_after( memory_order order ) CDS_NOEXCEPT + { + switch(order) { + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_relaxed: + case memory_order_consume: + case memory_order_release: + break; + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + } + } + + + static inline void fence_after_load(memory_order order) CDS_NOEXCEPT + { + switch(order) { + case memory_order_relaxed: + case memory_order_release: + break; + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_consume: + break; + case memory_order_seq_cst: + __asm { mfence }; + break; + default:; + } + } + + //----------------------------------------------------------------------------- + // fences + //----------------------------------------------------------------------------- + static inline void thread_fence(memory_order order) CDS_NOEXCEPT + { + switch(order) + { + case memory_order_relaxed: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_seq_cst: + __asm { mfence }; + break; + default:; + } + } + + static inline void signal_fence(memory_order order) CDS_NOEXCEPT + { + // C++11: 29.8.8: only compiler optimization, no hardware instructions + switch(order) + { + case memory_order_relaxed: + break; + case memory_order_consume: + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + default:; + } + } + + //----------------------------------------------------------------------------- + // atomic flag primitives + //----------------------------------------------------------------------------- + + typedef unsigned char atomic_flag_type; + static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order /*order*/ ) CDS_NOEXCEPT + { + return _interlockedbittestandset( (long volatile *) pFlag, 0 ) != 0; + } + + static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) CDS_NOEXCEPT + { + assert( order != memory_order_acquire + && order != memory_order_acq_rel + ); + + fence_before( order ); + *pFlag = 0; + fence_after( order ); + } + + + //----------------------------------------------------------------------------- + // 8bit primitives + //----------------------------------------------------------------------------- + +#if _MSC_VER >= 1600 +# pragma warning(push) + // Disable warning C4800: 'char' : forcing value to bool 'true' or 'false' (performance warning) +# pragma warning( disable: 4800 ) +#endif + template + static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 1, "Illegal operand size" ); + +# if _MSC_VER >= 1600 + T prev = expected; + expected = (T) _InterlockedCompareExchange8( reinterpret_cast(pDest), (char) desired, (char) expected ); + return expected == prev; +# else + bool bRet = false; + __asm { + mov ecx, pDest; + mov edx, expected; + mov al, byte ptr [edx]; + mov ah, desired; + lock cmpxchg byte ptr [ecx], ah; + mov byte ptr [edx], al; + setz bRet; + } + return bRet; +# endif + } +#if _MSC_VER >= 1600 +# pragma warning(pop) +#endif + + template + static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas8_strong( pDest, expected, desired, mo_success, mo_fail ); + } + +#if _MSC_VER >= 1600 +# pragma warning(push) + // Disable warning C4800: 'char' : forcing value to bool 'true' or 'false' (performance warning) +# pragma warning( disable: 4800 ) +#endif + template + static inline T exchange8( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 1, "Illegal operand size" ); + +# if _MSC_VER >= 1600 + return (T) _InterlockedExchange8( reinterpret_cast(pDest), (char) v ); +# else + __asm { + mov al, v; + mov ecx, pDest; + lock xchg byte ptr [ecx], al; + } +# endif + } +#if _MSC_VER >= 1600 +# pragma warning(pop) +#endif + + template + static inline void store8( T volatile * pDest, T src, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 1, "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange8( pDest, src, order ); + } + } + + template + static inline T load8( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 1, "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + + //----------------------------------------------------------------------------- + // 16bit primitives + //----------------------------------------------------------------------------- + + template + static inline T exchange16( T volatile * pDest, T v, memory_order /*order*/ ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal operand size" ); + assert( cds::details::is_aligned( pDest, 2 )); + +# if _MSC_VER >= 1600 + return (T) _InterlockedExchange16( (short volatile *) pDest, (short) v ); +# else + __asm { + mov ax, v; + mov ecx, pDest; + lock xchg word ptr [ecx], ax; + } +# endif + } + + template + static inline void store16( T volatile * pDest, T src, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + assert( cds::details::is_aligned( pDest, 2 )); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange16( pDest, src, order ); + } + } + + template + static inline T load16( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + assert( cds::details::is_aligned( pSrc, 2 )); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + + template + static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 2, "Illegal operand size" ); + assert( cds::details::is_aligned( pDest, 2 )); + + // _InterlockedCompareExchange behave as read-write memory barriers + T prev = expected; + expected = (T) _InterlockedCompareExchange16( (short *) pDest, (short) desired, (short) expected ); + return expected == prev; + } + + template + static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas16_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + //----------------------------------------------------------------------------- + // 32bit primitives + //----------------------------------------------------------------------------- + + template + static inline T exchange32( T volatile * pDest, T v, memory_order /*order*/ ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal operand size" ); + assert( cds::details::is_aligned( pDest, 4 )); + + return (T) _InterlockedExchange( (long *) pDest, (long) v ); + } + + template + static inline void store32( T volatile * pDest, T src, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + assert( cds::details::is_aligned( pDest, 4 )); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange32( pDest, src, order ); + } + } + + template + static inline T load32( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + assert( cds::details::is_aligned( pSrc, 4 )); + + T v( *pSrc ); + fence_after_load( order ); + return v; + } + + template + static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal operand size" ); + assert( cds::details::is_aligned( pDest, 4 )); + + // _InterlockedCompareExchange behave as read-write memory barriers + T prev = expected; + expected = (T) _InterlockedCompareExchange( (long *) pDest, (long) desired, (long) expected ); + return expected == prev; + } + + template + static inline bool cas32_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas32_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + // fetch_xxx may be emulated via cas32 + // If the platform has special fetch_xxx instruction + // then it should define CDS_ATOMIC_fetch32_xxx_defined macro + +# define CDS_ATOMIC_fetch32_add_defined + template + static inline T fetch32_add( T volatile * pDest, T v, memory_order /*order*/) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 4, "Illegal operand size" ); + assert( cds::details::is_aligned( pDest, 4 )); + + // _InterlockedExchangeAdd behave as read-write memory barriers + return (T) _InterlockedExchangeAdd( (long *) pDest, (long) v ); + } + + //----------------------------------------------------------------------------- + // 64bit primitives + //----------------------------------------------------------------------------- + + template + static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal operand size" ); + assert( cds::details::is_aligned( pDest, 8 )); + + // _InterlockedCompareExchange behave as read-write memory barriers + T prev = expected; + expected = (T) _InterlockedCompareExchange64( (__int64 *) pDest, (__int64) desired, (__int64) expected ); + return expected == prev; + } + + template + static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas64_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T load64( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + assert( cds::details::is_aligned( pSrc, 8 )); + + // Atomically loads 64bit value by SSE intrinsics + __m128i volatile v = _mm_loadl_epi64( (__m128i const *) pSrc ); + fence_after_load( order ); + return (T) v.m128i_i64[0]; + } + + + template + static inline T exchange64( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal operand size" ); + + T cur = load64( pDest, memory_order_relaxed ); + do { + } while (!cas64_weak( pDest, cur, v, order, memory_order_relaxed )); + return cur; + } + + template + static inline void store64( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T) == 8, "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + assert( cds::details::is_aligned( pDest, 8 )); + + if ( order != memory_order_seq_cst ) { + __m128i v; + v.m128i_i64[0] = val; + fence_before( order ); + _mm_storel_epi64( (__m128i *) pDest, v ); + } + else { + exchange64( pDest, val, order ); + } + } + + + //----------------------------------------------------------------------------- + // pointer primitives + //----------------------------------------------------------------------------- + + template + static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal operand size" ); + return (T *) _InterlockedExchange( (long volatile *) pDest, (uintptr_t) v ); + //return (T *) _InterlockedExchangePointer( (void * volatile *) pDest, reinterpret_cast(v) ); + } + + template + static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange_ptr( pDest, src, order ); + } + } + + template + static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + + T * v = *pSrc; + fence_after_load( order ); + return v; + } + + template + static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal operand size" ); + + // _InterlockedCompareExchangePointer behave as read-write memory barriers + T * prev = expected; + expected = (T *) _InterlockedCompareExchange( (long volatile *) pDest, (uintptr_t) desired, (uintptr_t) prev ); + return expected == prev; + } + + template + static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT + { + return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail ); + } + }} // namespace vc::x86 + +#ifndef CDS_CXX11_INLINE_NAMESPACE_SUPPORT + using namespace vc::x86; +#endif + } // namespace platform +}} // namespace cds::cxx11_atomics +//@endcond + +#endif // #ifndef __CDS_COMPILER_VC_X86_CXX11_ATOMIC_H diff --git a/cds/container/base.h b/cds/container/base.h new file mode 100644 index 00000000..bd6ed3e0 --- /dev/null +++ b/cds/container/base.h @@ -0,0 +1,60 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_BASE_H +#define __CDS_CONTAINER_BASE_H + +#include +#include + +namespace cds { + +/// Standard (non-intrusive) containers +/** + @ingroup cds_nonintrusive_containers + This namespace contains implementations of non-intrusive (std-like) lock-free containers. +*/ +namespace container { + + /// Common options for non-intrusive containers + /** @ingroup cds_nonintrusive_helper + This namespace contains options for non-intrusive containers that is, in general, the same as for the intrusive containers. + It imports all definitions from cds::opt and cds::intrusive::opt namespaces + */ + namespace opt { + using namespace cds::intrusive::opt; + } // namespace opt + + /// @defgroup cds_nonintrusive_containers Non-intrusive containers + /** @defgroup cds_nonintrusive_helper Helper structs for non-intrusive containers + @ingroup cds_nonintrusive_containers + */ + + /** @defgroup cds_nonintrusive_stack Stack + @ingroup cds_nonintrusive_containers + */ + /** @defgroup cds_nonintrusive_queue Queue + @ingroup cds_nonintrusive_containers + */ + /** @defgroup cds_nonintrusive_deque Deque + @ingroup cds_nonintrusive_containers + */ + /** @defgroup cds_nonintrusive_priority_queue Priority queue + @ingroup cds_nonintrusive_containers + */ + /** @defgroup cds_nonintrusive_map Map + @ingroup cds_nonintrusive_containers + */ + /** @defgroup cds_nonintrusive_set Set + @ingroup cds_nonintrusive_containers + */ + /** @defgroup cds_nonintrusive_list List + @ingroup cds_nonintrusive_containers + */ + /** @defgroup cds_nonintrusive_tree Tree + @ingroup cds_nonintrusive_containers + */ + +} // namespace container +} // namespace cds + +#endif // #ifndef __CDS_CONTAINER_BASE_H diff --git a/cds/container/basket_queue.h b/cds/container/basket_queue.h new file mode 100644 index 00000000..991b3cb3 --- /dev/null +++ b/cds/container/basket_queue.h @@ -0,0 +1,393 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_BASKET_QUEUE_H +#define __CDS_CONTAINER_BASKET_QUEUE_H + +#include +#include +#include +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + template + struct make_basket_queue + { + typedef GC gc; + typedef T value_type; + + struct default_options { + typedef cds::backoff::empty back_off; + typedef CDS_DEFAULT_ALLOCATOR allocator; + typedef atomicity::empty_item_counter item_counter; + typedef intrusive::basket_queue::dummy_stat stat; + typedef opt::v::relaxed_ordering memory_model; + enum { alignment = opt::cache_line_alignment }; + }; + + typedef typename opt::make_options< + typename cds::opt::find_type_traits< default_options, CDS_OPTIONS7 >::type + ,CDS_OPTIONS7 + >::type options; + + struct node_type: public intrusive::basket_queue::node< gc > + { + value_type m_value; + + node_type( const value_type& val ) + : m_value( val ) + {} +# ifdef CDS_EMPLACE_SUPPORT + template + node_type( Args&&... args ) + : m_value( std::forward(args)...) + {} +# else + node_type() + {} +# endif + }; + + typedef typename options::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + + typedef intrusive::BasketQueue< gc, + node_type + ,intrusive::opt::hook< + intrusive::basket_queue::base_hook< opt::gc > + > + ,opt::back_off< typename options::back_off > + ,intrusive::opt::disposer< node_deallocator > + ,opt::item_counter< typename options::item_counter > + ,opt::stat< typename options::stat > + ,opt::alignment< options::alignment > + ,opt::memory_model< typename options::memory_model > + > type; + }; + } + //@endcond + + /// Basket lock-free queue (non-intrusive variant) + /** @ingroup cds_nonintrusive_queue + It is non-intrusive version of basket queue algorithm based on intrusive::BasketQueue counterpart. + + \par Source: + [2007] Moshe Hoffman, Ori Shalev, Nir Shavit "The Baskets Queue" + + Key idea + + In the “basket” approach, instead of + the traditional ordered list of nodes, the queue consists of an ordered list of groups + of nodes (logical baskets). The order of nodes in each basket need not be specified, and in + fact, it is easiest to maintain them in LIFO order. The baskets fulfill the following basic + rules: + - Each basket has a time interval in which all its nodes’ enqueue operations overlap. + - The baskets are ordered by the order of their respective time intervals. + - For each basket, its nodes’ dequeue operations occur after its time interval. + - The dequeue operations are performed according to the order of baskets. + + Two properties define the FIFO order of nodes: + - The order of nodes in a basket is not specified. + - The order of nodes in different baskets is the FIFO-order of their respective baskets. + + In algorithms such as the MS-queue or optimistic + queue, threads enqueue items by applying a Compare-and-swap (CAS) operation to the + queue’s tail pointer, and all the threads that fail on a particular CAS operation (and also + the winner of that CAS) overlap in time. In particular, they share the time interval of + the CAS operation itself. Hence, all the threads that fail to CAS on the tail-node of + the queue may be inserted into the same basket. By integrating the basket-mechanism + as the back-off mechanism, the time usually spent on backing-off before trying to link + onto the new tail, can now be utilized to insert the failed operations into the basket, + allowing enqueues to complete sooner. In the meantime, the next successful CAS operations + by enqueues allow new baskets to be formed down the list, and these can be + filled concurrently. Moreover, the failed operations don’t retry their link attempt on the + new tail, lowering the overall contention on it. This leads to a queue + algorithm that unlike all former concurrent queue algorithms requires virtually no tuning + of the backoff mechanisms to reduce contention, making the algorithm an attractive + out-of-the-box queue. + + In order to enqueue, just as in MSQueue, a thread first tries to link the new node to + the last node. If it failed to do so, then another thread has already succeeded. Thus it + tries to insert the new node into the new basket that was created by the winner thread. + To dequeue a node, a thread first reads the head of the queue to obtain the + oldest basket. It may then dequeue any node in the oldest basket. + + + Template arguments: + - \p GC - garbage collector type: gc::HP, gc::HRC, gc::PTB + - \p T is a type stored in the queue. It should be default-constructible, copy-constructible, assignable type. + - \p Options - options + + Permissible \p Options: + - opt::allocator - allocator (like \p std::allocator). Default is \ref CDS_DEFAULT_ALLOCATOR + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter + - opt::stat - the type to gather internal statistics for debugging and profiling purposes. + Possible option value are: intrusive::basket_queue::stat, intrusive::basket_queue::dummy_stat (the default), + user-provided class that supports intrusive::basket_queue::stat interface. + Generic option intrusive::queue_stat and intrusive::queue_dummy_stat are acceptable too, however, + they will be automatically converted to intrusive::basket_queue::stat and intrusive::basket_queue::dummy_stat + respectively. + - opt::alignment - the alignment for internal queue data. Default is opt::cache_line_alignment + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + */ + template + class BasketQueue: +#ifdef CDS_DOXYGEN_INVOKED + intrusive::BasketQueue< GC, intrusive::basket_queue::node< T >, Options... > +#else + details::make_basket_queue< GC, T, CDS_OPTIONS7 >::type +#endif + { + //@cond + typedef details::make_basket_queue< GC, T, CDS_OPTIONS7 > options; + typedef typename options::type base_class; + //@endcond + + public: + /// Rebind template arguments + template + struct rebind { + typedef BasketQueue< GC2, T2, CDS_OTHER_OPTIONS7> other ; ///< Rebinding result + }; + + public: + typedef T value_type ; ///< Value type stored in the queue + + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename base_class::stat stat ; ///< Internal statistics policy used + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + protected: + typedef typename options::node_type node_type ; ///< queue node type (derived from intrusive::single_link::node) + + //@cond + typedef typename options::cxx_allocator cxx_allocator; + typedef typename options::node_deallocator node_deallocator; // deallocate node + typedef typename base_class::node_traits node_traits; + //@endcond + + protected: + ///@cond + static node_type * alloc_node() + { + return cxx_allocator().New(); + } + static node_type * alloc_node( const value_type& val ) + { + return cxx_allocator().New( val ); + } +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node_move( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( args )... ); + } +# endif + static void free_node( node_type * p ) + { + node_deallocator()( p ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + public: + /// Initializes empty queue + BasketQueue() + {} + + /// Destructor clears the queue + ~BasketQueue() + {} + + /// Returns queue's item count + /** \copydetails cds::intrusive::BasketQueue::size() + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns reference to internal statistics + const stat& statistics() const + { + return base_class::statistics(); + } + + /// Enqueues \p val value into the queue. + /** + The function makes queue node in dynamic memory calling copy constructor for \p val + and then it calls intrusive::BasketQueue::enqueue. + Returns \p true if success, \p false otherwise. + */ + bool enqueue( const value_type& val ) + { + scoped_node_ptr p( alloc_node(val)); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Enqueues \p data to queue using copy functor + /** + \p Func is a functor called to copy value \p data of type \p Type + which may be differ from type \p T stored in the queue. + The functor's interface is: + \code + struct myFunctor { + void operator()(T& dest, Type const& data) + { + // // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + + Requirements The functor \p Func should not throw any exception. + */ + template + bool enqueue( const Type& data, Func f ) + { + scoped_node_ptr p( alloc_node()); + cds::unref(f)( p->m_value, data ); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Enqueues data of type \ref value_type constructed with std::forward(args)... + /** + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr p( alloc_node_move( std::forward(args)...)); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } +# endif + + + /// Dequeues a value using copy functor + /** + \p Func is a functor called to copy dequeued value to \p dest of type \p Type + which may be differ from type \p T stored in the queue. + The functor's interface is: + \code + struct myFunctor { + void operator()(Type& dest, T const& data) + { + // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + + Requirements The functor \p Func should not throw any exception. + */ + template + bool dequeue( Type& dest, Func f ) + { + typename base_class::dequeue_result res; + if ( base_class::do_dequeue( res, true )) { + cds::unref(f)( dest, node_traits::to_value_ptr( *res.pNext )->m_value ); + return true; + } + return false; + } + + /// Dequeues a value from the queue + /** + If queue is not empty, the function returns \p true, \p dest contains copy of + dequeued value. The assignment operator for type \ref value_type is invoked. + If queue is empty, the function returns \p false, \p dest is unchanged. + */ + bool dequeue( value_type& dest ) + { + typedef cds::details::trivial_assign functor; + return dequeue( dest, functor() ); + } + + /// Synonym for \ref enqueue function + bool push( const value_type& val ) + { + return enqueue( val ); + } + + /// Synonym for template version of \ref enqueue function + template + bool push( const Type& data, Func f ) + { + return enqueue( data, f ); + } + + /// Synonym for \ref dequeue function + bool pop( value_type& dest ) + { + return dequeue( dest ); + } + + /// Synonym for template version of \ref dequeue function + template + bool pop( Type& dest, Func f ) + { + return dequeue( dest, f ); + } + + /// Checks if the queue is empty + /** + Note that this function is not \p const. + The function is based on \ref dequeue algorithm. + */ + bool empty() + { + return base_class::empty(); + } + + /// Clear the queue + /** + The function repeatedly calls \ref dequeue until it returns NULL. + */ + void clear() + { + base_class::clear(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_BASKET_QUEUE_H diff --git a/cds/container/cuckoo_base.h b/cds/container/cuckoo_base.h new file mode 100644 index 00000000..8c2f90b1 --- /dev/null +++ b/cds/container/cuckoo_base.h @@ -0,0 +1,211 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_CUCKOO_BASE_H +#define __CDS_CONTAINER_CUCKOO_BASE_H + +#include + +namespace cds { namespace container { + + /// CuckooSet and CuckooMap related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace cuckoo { + +#ifdef CDS_DOXYGEN_INVOKED + /// Lock striping concurrent access policy. This is typedef for intrusive::cuckoo::striping template + class striping + {}; +#else + using intrusive::cuckoo::striping; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + /// Refinable concurrent access policy. This is typedef for intrusive::cuckoo::refinable template + class refinable + {}; +#else + using intrusive::cuckoo::refinable; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + /// Striping internal statistics. This is typedef for intrusive::cuckoo::striping_stat + class striping_stat + {}; +#else + using intrusive::cuckoo::striping_stat; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + /// Empty striping internal statistics. This is typedef for intrusive::cuckoo::empty_striping_stat + class empty_striping_stat + {}; +#else + using intrusive::cuckoo::empty_striping_stat; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + /// Refinable internal statistics. This is typedef for intrusive::cuckoo::refinable_stat + class refinable_stat + {}; +#else + using intrusive::cuckoo::refinable_stat; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + /// Empty refinable internal statistics. This is typedef for intrusive::cuckoo::empty_refinable_stat + class empty_refinable_stat + {}; +#else + using intrusive::cuckoo::empty_refinable_stat; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + /// Cuckoo statistics. This is typedef for intrusive::cuckoo::stat + class stat + {}; +#else + using intrusive::cuckoo::stat; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + /// Cuckoo empty statistics.This is typedef for intrusive::cuckoo::empty_stat + class empty_stat + {}; +#else + using intrusive::cuckoo::empty_stat; +#endif + + /// Option specifying whether to store hash values in the node + /** + This option reserves additional space in the hook to store the hash value of the object once it's introduced in the container. + When this option is used, the unordered container will store the calculated hash value in the hook and rehashing operations won't need + to recalculate the hash of the value. This option will improve the performance of unordered containers + when rehashing is frequent or hashing the value is a slow operation + + The \p Enable template parameter toggles the feature: + - the value \p true enables storing the hash values + - the value \p false disables storing the hash values + */ + template + struct store_hash + { + //@cond + template + struct pack: public Base { + static bool const store_hash = Enable; + }; + //@endcond + }; + +#ifdef CDS_DOXYGEN_INVOKED + /// Probe set type option + /** + The option specifies probe set type for the CuckooSet and CuckooMap. + Available \p Type: + - \p cuckoo::list - the probe-set is a single-linked list. + - \p cuckoo::vector - the probe-set is a vector + with constant-size \p Capacity where \p Capacity is an unsigned int constant. + */ + template + struct probeset_type + {}; +#else + using intrusive::cuckoo::probeset_type; +#endif + + using intrusive::cuckoo::list; + using intrusive::cuckoo::vector; + + /// Type traits for CuckooSet and CuckooMap classes + struct type_traits + { + /// Hash functors tuple + /** + This is mandatory type and has no predefined one. + + At least, two hash functors should be provided. All hash functor + should be orthogonal (different): for each i,j: i != j => h[i](x) != h[j](x) . + The hash functors are defined as std::tuple< H1, H2, ... Hn > : + \@code cds::opt::hash< std::tuple< h1, h2 > > \@endcode + The number of hash functors specifies the number \p k - the count of hash tables in cuckoo hashing. + Up to 10 different hash functors are supported. + */ + typedef cds::opt::none hash; + + /// Concurrent access policy + /** + Available opt::mutex_policy types: + - cuckoo::striping - simple, but the lock array is not resizable + - cuckoo::refinable - resizable lock array, but more complex access to set data. + + Default is cuckoo::striping. + */ + typedef cuckoo::striping<> mutex_policy; + + /// Key equality functor + /** + Default is std::equal_to + */ + typedef opt::none equal_to; + + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// specifies binary predicate used for key comparison. + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// Item counter + /** + The type for item counting feature. + Default is cds::atomicity::item_counter + + Only atomic item counter type is allowed. + */ + typedef cds::intrusive::cuckoo::type_traits::item_counter item_counter; + + /// Allocator type + /** + The allocator type for allocating bucket tables. + Default is \p CDS_DEFAULT_ALLOCATOR that is \p std::allocator + */ + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Node allocator type + /** + If this type is not set explicitly, the \ref allocator type is used. + */ + typedef opt::none node_allocator; + + /// Store hash value into items. See cuckoo::store_hash for explanation + static bool const store_hash = false; + + /// Probe-set type. See \ref probeset_type option for explanation + typedef cuckoo::list probeset_type; + + /// Internal statistics + typedef empty_stat stat; + }; + + /// Metafunction converting option list to CuckooSet/CuckooMap traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + \p Options list see CuckooSet and CuckooMap + */ + template + struct make_traits { + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< cuckoo::type_traits, CDS_OPTIONS12 >::type + ,CDS_OPTIONS12 + >::type type ; ///< Result of metafunction + }; + } // namespace cuckoo +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_CUCKOO_BASE_H diff --git a/cds/container/cuckoo_map.h b/cds/container/cuckoo_map.h new file mode 100644 index 00000000..d9282266 --- /dev/null +++ b/cds/container/cuckoo_map.h @@ -0,0 +1,906 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_CUCKOO_MAP_H +#define __CDS_CONTAINER_CUCKOO_MAP_H + +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + template + struct make_cuckoo_map + { + typedef Key key_type ; ///< key type + typedef T mapped_type ; ///< type of value stored in the map + typedef std::pair value_type ; ///< Pair type + + typedef Traits original_type_traits; + typedef typename original_type_traits::probeset_type probeset_type; + static bool const store_hash = original_type_traits::store_hash; + static unsigned int const store_hash_count = store_hash ? ((unsigned int) std::tuple_size< typename original_type_traits::hash::hash_tuple_type >::value) : 0; + + struct node_type: public intrusive::cuckoo::node + { + value_type m_val; + + template + node_type( K const& key ) + : m_val( std::make_pair( key_type(key), mapped_type() )) + {} + + template + node_type( K const& key, Q const& v ) + : m_val( std::make_pair( key_type(key), mapped_type(v) )) + {} + +# ifdef CDS_EMPLACE_SUPPORT + template + node_type( K&& key, Args&&... args ) + : m_val( std::forward(key), std::move( mapped_type(std::forward(args)...)) ) + {} +# else + node_type() + {} +# endif + }; + + /* + template + struct predicate_wrapper { + typedef Pred native_predicate; + + ReturnValue operator()( node_type const& n1, node_type const& n2) const + { + return native_predicate()(n1.m_val.first, n2.m_val.first ); + } + template + ReturnValue operator()( node_type const& n, Q const& v) const + { + return native_predicate()(n.m_val.first, v); + } + template + ReturnValue operator()( Q const& v, node_type const& n) const + { + return native_predicate()(v, n.m_val.first); + } + + template + ReturnValue operator()( Q1 const& v1, Q2 const& v2) const + { + return native_predicate()(v1, v2); + } + }; + */ + + struct key_accessor { + key_type const& operator()( node_type const& node ) const + { + return node.m_val.first; + } + }; + + struct intrusive_traits: public original_type_traits + { + typedef intrusive::cuckoo::base_hook< + cds::intrusive::cuckoo::probeset_type< probeset_type > + ,cds::intrusive::cuckoo::store_hash< store_hash_count > + > hook; + + typedef cds::intrusive::cuckoo::type_traits::disposer disposer; + + typedef typename std::conditional< + std::is_same< typename original_type_traits::equal_to, opt::none >::value + , opt::none + , cds::details::predicate_wrapper< node_type, typename original_type_traits::equal_to, key_accessor > + >::type equal_to; + + typedef typename std::conditional< + std::is_same< typename original_type_traits::compare, opt::none >::value + , opt::none + , cds::details::compare_wrapper< node_type, typename original_type_traits::compare, key_accessor > + >::type compare; + + typedef typename std::conditional< + std::is_same< typename original_type_traits::less, opt::none >::value + ,opt::none + ,cds::details::predicate_wrapper< node_type, typename original_type_traits::less, key_accessor > + >::type less; + + typedef opt::details::hash_list_wrapper< typename original_type_traits::hash, node_type, key_accessor > hash; + }; + + typedef intrusive::CuckooSet< node_type, intrusive_traits > type; + }; + } // namespace details + //@endcond + + /// Cuckoo hash map + /** @ingroup cds_nonintrusive_map + + Source + - [2007] M.Herlihy, N.Shavit, M.Tzafrir "Concurrent Cuckoo Hashing. Technical report" + - [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming" + + About Cuckoo hashing + + [From "The Art of Multiprocessor Programming"] + Cuckoo hashing is a hashing algorithm in which a newly added item displaces any earlier item + occupying the same slot. For brevity, a table is a k-entry array of items. For a hash set f size + N = 2k we use a two-entry array of tables, and two independent hash functions, + h0, h1: KeyRange -> 0,...,k-1 + mapping the set of possible keys to entries in he array. To test whether a value \p x is in the set, + find(x) tests whether either table[0][h0(x)] or table[1][h1(x)] is + equal to \p x. Similarly, erase(x)checks whether \p x is in either table[0][h0(x)] + or table[1][h1(x)], ad removes it if found. + + The insert(x) successively "kicks out" conflicting items until every key has a slot. + To add \p x, the method swaps \p x with \p y, the current occupant of table[0][h0(x)]. + If the prior value was \p NULL, it is done. Otherwise, it swaps the newly nest-less value \p y + for the current occupant of table[1][h1(y)] in the same way. As before, if the prior value + was \p NULL, it is done. Otherwise, the method continues swapping entries (alternating tables) + until it finds an empty slot. We might not find an empty slot, either because the table is full, + or because the sequence of displacement forms a cycle. We therefore need an upper limit on the + number of successive displacements we are willing to undertake. When this limit is exceeded, + we resize the hash table, choose new hash functions and start over. + + For concurrent cuckoo hashing, rather than organizing the set as a two-dimensional table of + items, we use two-dimensional table of probe sets, where a probe set is a constant-sized set + of items with the same hash code. Each probe set holds at most \p PROBE_SIZE items, but the algorithm + tries to ensure that when the set is quiescent (i.e no method call in progress) each probe set + holds no more than THRESHOLD < PROBE_SET items. While method calls are in-flight, a probe + set may temporarily hold more than \p THRESHOLD but never more than \p PROBE_SET items. + + In current implementation, a probe set can be defined either as a (single-linked) list + or as a fixed-sized vector, optionally ordered. + + In description above two-table cuckoo hashing (k = 2) has been considered. + We can generalize this approach for k >= 2 when we have \p k hash functions + h[0], ... h[k-1] and \p k tables table[0], ... table[k-1]. + + The search in probe set is linear, the complexity is O(PROBE_SET) . + The probe set may be ordered or not. Ordered probe set can be a little better since + the average search complexity is O(PROBE_SET/2). + However, the overhead of sorting can eliminate a gain of ordered search. + + The probe set is ordered if opt::compare or opt::less is specified in \p %CuckooSet + declaration. Otherwise, the probe set is unordered and \p %CuckooSet must contain + opt::equal_to option. + + Template arguments: + - \p Key - key type + - \p T - the type stored in the map. + - \p Traits - type traits. See cuckoo::type_traits for explanation. + It is possible to declare option-based set with cuckoo::make_traits metafunction result as \p Traits template argument. + + Template argument list \p Options... of cuckoo::make_traits metafunction are: + - opt::hash - hash functor tuple, mandatory option. At least, two hash functors should be provided. All hash functor + should be orthogonal (different): for each i,j: i != j => h[i](x) != h[j](x) . + The hash functors are passed as std::tuple< H1, H2, ... Hn > . The number of hash functors specifies + the number \p k - the count of hash tables in cuckoo hashing. If the compiler supports variadic templates + then k is unlimited, otherwise up to 10 different hash functors are supported. + - opt::mutex_policy - concurrent access policy. + Available policies: cuckoo::striping, cuckoo::refinable. + Default is cuckoo::striping. + - opt::equal_to - key equality functor like \p std::equal_to. + If this functor is defined then the probe-set will be unordered. + If opt::compare or opt::less option is specified too, then the probe-set will be ordered + and opt::equal_to will be ignored. + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + If opt::compare or opt::less option is specified, then the probe-set will be ordered. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + If opt::compare or opt::less option is specified, then the probe-set will be ordered. + - opt::item_counter - the type of item counting feature. Default is \ref opt::v::sequential_item_counter. + - opt::allocator - the allocator type using for allocating bucket tables. + Default is \p CDS_DEFAULT_ALLOCATOR + - opt::node_allocator - the allocator type using for allocating map's items. If this option + is not specified then the type defined in opt::allocator option is used. + - cuckoo::store_hash - this option reserves additional space in the node to store the hash value + of the object once it's introduced in the container. When this option is used, + the map will store the calculated hash value in the node and rehashing operations won't need + to recalculate the hash of the value. This option will improve the performance of maps + when rehashing is frequent or hashing the value is a slow operation. Default value is \p false. + - \ref intrusive::cuckoo::probeset_type "cuckoo::probeset_type" - type of probe set, may be \p cuckoo::list or cuckoo::vector, + Default is \p cuckoo::list. + - opt::stat - internal statistics. Possibly types: cuckoo::stat, cuckoo::empty_stat. + Default is cuckoo::empty_stat + + Examples + + Declares cuckoo mapping from \p std::string to struct \p foo. + For cuckoo hashing we should provide at least two hash functions: + \code + struct hash1 { + size_t operator()(std::string const& s) const + { + return cds::opt::v::hash( s ); + } + }; + + struct hash2: private hash1 { + size_t operator()(std::string const& s) const + { + size_t h = ~( hash1::operator()(s)); + return ~h + 0x9e3779b9 + (h << 6) + (h >> 2); + } + }; + \endcode + + Cuckoo-map with list-based unordered probe set and storing hash values + \code + #include + + // Declare type traits + struct my_traits: public cds::container::cuckoo::type_traits + { + typedef std::equal_to< std::string > equal_to; + typedef std::tuple< hash1, hash2 > hash; + + static bool const store_hash = true; + }; + + // Declare CuckooMap type + typedef cds::container::CuckooMap< std::string, foo, my_traits > my_cuckoo_map; + + // Equal option-based declaration + typedef cds::container::CuckooMap< std::string, foo, + cds::container::cuckoo::make_traits< + cds::opt::hash< std::tuple< hash1, hash2 > > + ,cds::opt::equal_to< std::equal_to< std::string > > + ,cds::container::cuckoo::store_hash< true > + >::type + > opt_cuckoo_map; + \endcode + + If we provide \p less functor instead of \p equal_to + we get as a result a cuckoo map with ordered probe set that may improve + performance. + Example for ordered vector-based probe-set: + + \code + #include + + // Declare type traits + // We use a vector of capacity 4 as probe-set container and store hash values in the node + struct my_traits: public cds::container::cuckoo::type_traits + { + typedef std::less< std::string > less; + typedef std::tuple< hash1, hash2 > hash; + typedef cds::container::cuckoo::vector<4> probeset_type; + + static bool const store_hash = true; + }; + + // Declare CuckooMap type + typedef cds::container::CuckooMap< std::string, foo, my_traits > my_cuckoo_map; + + // Equal option-based declaration + typedef cds::container::CuckooMap< std::string, foo, + cds::container::cuckoo::make_traits< + cds::opt::hash< std::tuple< hash1, hash2 > > + ,cds::opt::less< std::less< std::string > > + ,cds::container::cuckoo::probeset_type< cds::container::cuckoo::vector<4> > + ,cds::container::cuckoo::store_hash< true > + >::type + > opt_cuckoo_map; + \endcode + + */ + template + class CuckooMap: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::CuckooSet< std::pair< Key const, T>, Traits> +#else + protected details::make_cuckoo_map::type +#endif + { + //@cond + typedef details::make_cuckoo_map maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef Key key_type ; ///< key type + typedef T mapped_type ; ///< value type stored in the container + typedef std::pair value_type ; ///< Key-value pair type stored in the map + + typedef Traits options ; ///< traits + + typedef typename options::hash hash ; ///< hash functor tuple wrapped for internal use + typedef typename base_class::hash_tuple_type hash_tuple_type ; ///< hash tuple type + + typedef typename base_class::mutex_policy mutex_policy ; ///< Concurrent access policy, see cuckoo::type_traits::mutex_policy + typedef typename base_class::stat stat ; ///< internal statistics type + + static bool const c_isSorted = base_class::c_isSorted ; ///< whether the probe set should be ordered + static size_t const c_nArity = base_class::c_nArity ; ///< the arity of cuckoo hashing: the number of hash functors provided; minimum 2. + + typedef typename base_class::key_equal_to key_equal_to ; ///< Key equality functor; used only for unordered probe-set + + typedef typename base_class::key_comparator key_comparator ; ///< key comparing functor based on opt::compare and opt::less option setter. Used only for ordered probe set + + typedef typename base_class::allocator allocator ; ///< allocator type used for internal bucket table allocations + + /// Node allocator type + typedef typename std::conditional< + std::is_same< typename options::node_allocator, opt::none >::value, + allocator, + typename options::node_allocator + >::type node_allocator; + + /// item counter type + typedef typename options::item_counter item_counter; + + protected: + //@cond + typedef typename base_class::scoped_cell_lock scoped_cell_lock; + typedef typename base_class::scoped_full_lock scoped_full_lock; + typedef typename base_class::scoped_resize_lock scoped_resize_lock; + typedef typename maker::key_accessor key_accessor; + + typedef typename base_class::value_type node_type; + typedef cds::details::Allocator< node_type, node_allocator > cxx_node_allocator; + //@endcond + + public: + static unsigned int const c_nDefaultProbesetSize = base_class::c_nDefaultProbesetSize ; ///< default probeset size + static size_t const c_nDefaultInitialSize = base_class::c_nDefaultInitialSize ; ///< default initial size + static unsigned int const c_nRelocateLimit = base_class::c_nRelocateLimit ; ///< Count of attempts to relocate before giving up + + protected: + //@cond + template + static node_type * alloc_node( K const& key ) + { + return cxx_node_allocator().New( key ); + } +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node( K&& key, Args&&... args ) + { + return cxx_node_allocator().MoveNew( std::forward( key ), std::forward(args)... ); + } +# endif + + static void free_node( node_type * pNode ) + { + cxx_node_allocator().Delete( pNode ); + } + //@endcond + + protected: + //@cond + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + +#ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_insert_functor + { + void operator()( value_type& ) const + {} + }; + + template + class insert_value_functor + { + Q const& m_val; + public: + insert_value_functor( Q const & v) + : m_val(v) + {} + + void operator()( value_type& item ) + { + item.second = m_val; + } + }; + + template + class insert_key_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + insert_key_wrapper( Func f ): base_class(f) {} + + void operator()( node_type& item ) + { + base_class::get()( item.m_val ); + } + }; + + template + class ensure_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + ensure_wrapper( Func f) : base_class(f) {} + + void operator()( bool bNew, node_type& item, node_type const& ) + { + base_class::get()( bNew, item.m_val ); + } + }; + + template + class find_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + find_wrapper( Func f ) + : base_class(f) + {} + + template + void operator()( node_type& item, Q& val ) + { + base_class::get()( item.m_val, val ); + } + }; +#endif // #ifndef CDS_CXX11_LAMBDA_SUPPORT + + //@endcond + + public: + /// Default constructor + /** + Initial size = \ref c_nDefaultInitialSize + + Probe set size: + - \ref c_nDefaultProbesetSize if \p probeset_type is \p cuckoo::list + - \p Capacity if \p probeset_type is cuckoo::vector + + Probe set threshold = probe set size - 1 + */ + CuckooMap() + {} + + /// Constructs an object with given probe set size and threshold + /** + If probe set type is cuckoo::vector vector + then \p nProbesetSize should be equal to vector's \p Capacity. + */ + CuckooMap( + size_t nInitialSize ///< Initial map size; if 0 - use default initial size \ref c_nDefaultInitialSize + , unsigned int nProbesetSize ///< probe set size + , unsigned int nProbesetThreshold = 0 ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 + ) + : base_class( nInitialSize, nProbesetSize, nProbesetThreshold ) + {} + + /// Constructs an object with given hash functor tuple + /** + The probe set size and threshold are set as default, see CuckooSet() + */ + CuckooMap( + hash_tuple_type const& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : base_class( h ) + {} + + /// Constructs a map with given probe set properties and hash functor tuple + /** + If probe set type is cuckoo::vector vector + then \p nProbesetSize should be equal to vector's \p Capacity. + */ + CuckooMap( + size_t nInitialSize ///< Initial map size; if 0 - use default initial size \ref c_nDefaultInitialSize + , unsigned int nProbesetSize ///< probe set size + , unsigned int nProbesetThreshold ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 + , hash_tuple_type const& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : base_class( nInitialSize, nProbesetSize, nProbesetThreshold, h ) + {} + +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + /// Constructs a map with given hash functor tuple (move semantics) + /** + The probe set size and threshold are set as default, see CuckooSet() + */ + CuckooMap( + hash_tuple_type&& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : base_class( std::forward(h) ) + {} + + /// Constructs a map with given probe set properties and hash functor tuple (move semantics) + /** + If probe set type is cuckoo::vector vector + then \p nProbesetSize should be equal to vector's \p Capacity. + */ + CuckooMap( + size_t nInitialSize ///< Initial map size; if 0 - use default initial size \ref c_nDefaultInitialSize + , unsigned int nProbesetSize ///< probe set size + , unsigned int nProbesetThreshold ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 + , hash_tuple_type&& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : base_class( nInitialSize, nProbesetSize, nProbesetThreshold, std::forward(h) ) + {} +# endif // ifdef CDS_MOVE_SEMANTICS_SUPPORT + + /// Destructor clears the map + ~CuckooMap() + { + clear(); + } + + public: + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from a value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K const& key ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return insert_key( key, [](value_type&){} ); +# else + return insert_key( key, empty_insert_functor() ); +# endif + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref value_type should be constructible from \p val of type \p V. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return insert_key( key, [&val](value_type& item) { item.second = val ; } ); +# else + insert_value_functor f(val); + return insert_key( key, cds::ref(f) ); +# endif + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + The user-defined functor can be passed by reference using boost::ref + and it is called only if inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + */ + template + bool insert_key( const K& key, Func func ) + { + scoped_node_ptr pNode( alloc_node( key )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( base_class::insert( *pNode, [&func]( node_type& item ) { cds::unref(func)( item.m_val ); } )) +# else + insert_key_wrapper wrapper(func); + if ( base_class::insert( *pNode, cds::ref(wrapper) )) +#endif + { + pNode.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// For key \p key inserts data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( K&& key, Args&&... args ) + { + scoped_node_ptr pNode( alloc_node( std::forward(key), std::forward(args)... )); + if ( base_class::insert( *pNode )) { + pNode.release(); + return true; + } + return false; + } +# endif + + + /// Ensures that the \p key exists in the map + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the map, then the new item created from \p key + is inserted into the map (note that in this case the \ref key_type should be + constructible from type \p K). + Otherwise, the functor \p func is called with item found. + The functor \p Func may be a function with signature: + \code + void func( bool bNew, value_type& item ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + + The functor may change any fields of the \p item.second that is \ref value_type. + + You may pass \p func argument by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( K const& key, Func func ) + { + scoped_node_ptr pNode( alloc_node( key )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair res = base_class::ensure( *pNode, + [&func](bool bNew, node_type& item, node_type const& ){ cds::unref(func)( bNew, item.m_val ); } + ); +# else + ensure_wrapper wrapper( func ); + std::pair res = base_class::ensure( *pNode, cds::ref(wrapper) ); +# endif + if ( res.first && res.second ) + pNode.release(); + return res; + } + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_CuckooMap_erase_val + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + node_type * pNode = base_class::erase(key); + if ( pNode ) { + free_node( pNode ); + return true; + } + return false; + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_CuckooMap_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + If cuckoo map is ordered, then \p Predicate should have the interface and semantics like \p std::less. + If cuckoo map is unordered, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p Predicate must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Predicate pred ) + { + node_type * pNode = base_class::erase_with(key, cds::details::predicate_wrapper()); + if ( pNode ) { + free_node( pNode ); + return true; + } + return false; + } + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_CuckooMap_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + The functor may be passed by reference using boost:ref + + Return \p true if key is found and deleted, \p false otherwise + + See also: \ref erase + */ + template + bool erase( K const& key, Func f ) + { + node_type * pNode = base_class::erase( key ); + if ( pNode ) { + cds::unref(f)( pNode->m_val ); + free_node( pNode ); + return true; + } + return false; + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_CuckooMap_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + If cuckoo map is ordered, then \p Predicate should have the interface and semantics like \p std::less. + If cuckoo map is unordered, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p Predicate must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Predicate pred, Func f ) + { + node_type * pNode = base_class::erase_with( key, cds::details::predicate_wrapper() ); + if ( pNode ) { + cds::unref(f)( pNode->m_val ); + free_node( pNode ); + return true; + } + return false; + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_CuckooMap_find_func + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + You can pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change \p item.second. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( key, [&f](node_type& item, K const& ) { cds::unref(f)( item.m_val );}); +# else + find_wrapper wrapper(f); + return base_class::find( key, cds::ref(wrapper) ); +# endif + } + + /// Find the key \p val using \p pred predicate for comparing + /** + The function is an analog of \ref cds_nonintrusive_CuckooMap_find_func "find(K const&, Func)" + but \p pred is used for key comparison. + If you use ordered cuckoo map, then \p Predicate should have the interface and semantics like \p std::less. + If you use unordered cuckoo map, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Predicate pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( key, cds::details::predicate_wrapper(), + [&f](node_type& item, K const& ) { cds::unref(f)( item.m_val );}); +# else + find_wrapper wrapper(f); + return base_class::find_with( key, cds::details::predicate_wrapper(), cds::ref(wrapper) ); +# endif + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_CuckooMap_find_val + + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool find( K const& key ) + { + return base_class::find( key ); + } + + /// Find the key \p val using \p pred predicate for comparing + /** + The function is an analog of \ref cds_nonintrusive_CuckooMap_find_val "find(K const&)" + but \p pred is used for key comparison. + If you use ordered cuckoo map, then \p Predicate should have the interface and semantics like \p std::less. + If you use unordered cuckoo map, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Predicate pred ) + { + return base_class::find_with( key, cds::details::predicate_wrapper() ); + } + + /// Clears the map + void clear() + { + base_class::clear_and_dispose( node_disposer() ); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + + /// Returns the size of hash table + /** + The hash table size is non-constant and can be increased via resizing. + */ + size_t bucket_count() const + { + return base_class::bucket_count(); + } + + /// Returns lock array size + /** + The lock array size is constant. + */ + size_t lock_count() const + { + return base_class::lock_count(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Returns const reference to mutex policy internal statistics + typename mutex_policy::statistics_type const& mutex_policy_statistics() const + { + return base_class::mutex_policy_statistics(); + } + + }; +}} // namespace cds::container + +#endif //#ifndef __CDS_CONTAINER_CUCKOO_MAP_H diff --git a/cds/container/cuckoo_set.h b/cds/container/cuckoo_set.h new file mode 100644 index 00000000..05606af2 --- /dev/null +++ b/cds/container/cuckoo_set.h @@ -0,0 +1,987 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_CUCKOO_SET_H +#define __CDS_CONTAINER_CUCKOO_SET_H + +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + template + struct make_cuckoo_set + { + typedef T value_type; + typedef Traits original_type_traits; + typedef typename original_type_traits::probeset_type probeset_type; + static bool const store_hash = original_type_traits::store_hash; + static unsigned int const store_hash_count = store_hash ? ((unsigned int) std::tuple_size< typename original_type_traits::hash::hash_tuple_type >::value) : 0; + + struct node_type: public intrusive::cuckoo::node + { + value_type m_val; + + template + node_type( Q const& v ) + : m_val(v) + {} + +# ifdef CDS_EMPLACE_SUPPORT + template + node_type( Args&&... args ) + : m_val( std::forward(args)...) + {} +# else + node_type() + {} +# endif + }; + + struct value_accessor { + value_type const& operator()( node_type const& node ) const + { + return node.m_val; + } + }; + +#ifdef CDS_CXX11_TEMPLATE_ALIAS_SUPPORT + template + using predicate_wrapper = cds::details::binary_functor_wrapper< ReturnValue, Pred, node_type, value_accessor >; +#else + template + struct predicate_wrapper: public cds::details::binary_functor_wrapper< ReturnValue, Pred, node_type, value_accessor > + {}; +#endif + + struct intrusive_traits: public original_type_traits + { + typedef intrusive::cuckoo::base_hook< + cds::intrusive::cuckoo::probeset_type< probeset_type > + ,cds::intrusive::cuckoo::store_hash< store_hash_count > + > hook; + + typedef cds::intrusive::cuckoo::type_traits::disposer disposer; + + typedef typename std::conditional< + std::is_same< typename original_type_traits::equal_to, opt::none >::value + , opt::none + , predicate_wrapper< typename original_type_traits::equal_to, bool > + >::type equal_to; + + typedef typename std::conditional< + std::is_same< typename original_type_traits::compare, opt::none >::value + , opt::none + , predicate_wrapper< typename original_type_traits::compare, int > + >::type compare; + + typedef typename std::conditional< + std::is_same< typename original_type_traits::less, opt::none >::value + ,opt::none + ,predicate_wrapper< typename original_type_traits::less, bool > + >::type less; + + typedef opt::details::hash_list_wrapper< typename original_type_traits::hash, node_type, value_accessor > hash; + }; + + typedef intrusive::CuckooSet< node_type, intrusive_traits > type; + }; + } // namespace details + //@endcond + + /// Cuckoo hash set + /** @ingroup cds_nonintrusive_set + + Source + - [2007] M.Herlihy, N.Shavit, M.Tzafrir "Concurrent Cuckoo Hashing. Technical report" + - [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming" + + About Cuckoo hashing + + [From "The Art of Multiprocessor Programming"] + Cuckoo hashing is a hashing algorithm in which a newly added item displaces any earlier item + occupying the same slot. For brevity, a table is a k-entry array of items. For a hash set f size + N = 2k we use a two-entry array of tables, and two independent hash functions, + h0, h1: KeyRange -> 0,...,k-1 + mapping the set of possible keys to entries in he array. To test whether a value \p x is in the set, + find(x) tests whether either table[0][h0(x)] or table[1][h1(x)] is + equal to \p x. Similarly, erase(x)checks whether \p x is in either table[0][h0(x)] + or table[1][h1(x)], ad removes it if found. + + The insert(x) successively "kicks out" conflicting items until every key has a slot. + To add \p x, the method swaps \p x with \p y, the current occupant of table[0][h0(x)]. + If the prior value was \p NULL, it is done. Otherwise, it swaps the newly nest-less value \p y + for the current occupant of table[1][h1(y)] in the same way. As before, if the prior value + was \p NULL, it is done. Otherwise, the method continues swapping entries (alternating tables) + until it finds an empty slot. We might not find an empty slot, either because the table is full, + or because the sequence of displacement forms a cycle. We therefore need an upper limit on the + number of successive displacements we are willing to undertake. When this limit is exceeded, + we resize the hash table, choose new hash functions and start over. + + For concurrent cuckoo hashing, rather than organizing the set as a two-dimensional table of + items, we use two-dimensional table of probe sets, where a probe set is a constant-sized set + of items with the same hash code. Each probe set holds at most \p PROBE_SIZE items, but the algorithm + tries to ensure that when the set is quiescent (i.e no method call in progress) each probe set + holds no more than THRESHOLD < PROBE_SET items. While method calls are in-flight, a probe + set may temporarily hold more than \p THRESHOLD but never more than \p PROBE_SET items. + + In current implementation, a probe set can be defined either as a (single-linked) list + or as a fixed-sized vector, optionally ordered. + + In description above two-table cuckoo hashing (k = 2) has been considered. + We can generalize this approach for k >= 2 when we have \p k hash functions + h[0], ... h[k-1] and \p k tables table[0], ... table[k-1]. + + The search in probe set is linear, the complexity is O(PROBE_SET) . + The probe set may be ordered or not. Ordered probe set can be a little better since + the average search complexity is O(PROBE_SET/2). + However, the overhead of sorting can eliminate a gain of ordered search. + + The probe set is ordered if opt::compare or opt::less is specified in \p %CuckooSet + declaration. Otherwise, the probe set is unordered and \p %CuckooSet must contain + opt::equal_to option. + + Template arguments: + - \p T - the type stored in the set. + - \p Traits - type traits. See cuckoo::type_traits for explanation. + It is possible to declare option-based set with cuckoo::make_traits metafunction result as \p Traits template argument. + + Template argument list \p Options... of cuckoo::make_traits metafunction are: + - opt::hash - hash functor tuple, mandatory option. At least, two hash functors should be provided. All hash functor + should be orthogonal (different): for each i,j: i != j => h[i](x) != h[j](x) . + The hash functors are passed as std::tuple< H1, H2, ... Hn > . The number of hash functors specifies + the number \p k - the count of hash tables in cuckoo hashing. If the compiler supports variadic templates + then k is unlimited, otherwise up to 10 different hash functors are supported. + - opt::mutex_policy - concurrent access policy. + Available policies: cuckoo::striping, cuckoo::refinable. + Default is cuckoo::striping. + - opt::equal_to - key equality functor like \p std::equal_to. + If this functor is defined then the probe-set will be unordered. + If opt::compare or opt::less option is specified too, then the probe-set will be ordered + and opt::equal_to will be ignored. + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + If opt::compare or opt::less option is specified, then the probe-set will be ordered. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + If opt::compare or opt::less option is specified, then the probe-set will be ordered. + - opt::item_counter - the type of item counting feature. Default is \ref opt::v::sequential_item_counter. + - opt::allocator - the allocator type using for allocating bucket tables. + Default is \p CDS_DEFAULT_ALLOCATOR + - opt::node_allocator - the allocator type using for allocating set's items. If this option + is not specified then the type defined in opt::allocator option is used. + - cuckoo::store_hash - this option reserves additional space in the node to store the hash value + of the object once it's introduced in the container. When this option is used, + the unordered container will store the calculated hash value in the node and rehashing operations won't need + to recalculate the hash of the value. This option will improve the performance of unordered containers + when rehashing is frequent or hashing the value is a slow operation. Default value is \p false. + - \ref intrusive::cuckoo::probeset_type "cuckoo::probeset_type" - type of probe set, may be \p cuckoo::list or cuckoo::vector, + Default is \p cuckoo::list. + - opt::stat - internal statistics. Possibly types: cuckoo::stat, cuckoo::empty_stat. + Default is cuckoo::empty_stat + + Examples + + Cuckoo-set with list-based unordered probe set and storing hash values + \code + #include + + // Data stored in cuckoo set + struct my_data + { + // key field + std::string strKey; + + // other data + // ... + }; + + // Provide equal_to functor for my_data since we will use unordered probe-set + struct my_data_equal_to { + bool operator()( const my_data& d1, const my_data& d2 ) const + { + return d1.strKey.compare( d2.strKey ) == 0; + } + + bool operator()( const my_data& d, const std::string& s ) const + { + return d.strKey.compare(s) == 0; + } + + bool operator()( const std::string& s, const my_data& d ) const + { + return s.compare( d.strKey ) == 0; + } + }; + + // Provide two hash functor for my_data + struct hash1 { + size_t operator()(std::string const& s) const + { + return cds::opt::v::hash( s ); + } + size_t operator()( my_data const& d ) const + { + return (*this)( d.strKey ); + } + }; + + struct hash2: private hash1 { + size_t operator()(std::string const& s) const + { + size_t h = ~( hash1::operator()(s)); + return ~h + 0x9e3779b9 + (h << 6) + (h >> 2); + } + size_t operator()( my_data const& d ) const + { + return (*this)( d.strKey ); + } + }; + + // Declare type traits + struct my_traits: public cds::container::cuckoo::type_traits + { + typedef my_data_equa_to equal_to; + typedef std::tuple< hash1, hash2 > hash; + + static bool const store_hash = true; + }; + + // Declare CuckooSet type + typedef cds::container::CuckooSet< my_data, my_traits > my_cuckoo_set; + + // Equal option-based declaration + typedef cds::container::CuckooSet< my_data, + cds::container::cuckoo::make_traits< + cds::opt::hash< std::tuple< hash1, hash2 > > + ,cds::opt::equal_to< my_data_equal_to > + ,cds::container::cuckoo::store_hash< true > + >::type + > opt_cuckoo_set; + \endcode + + If we provide \p compare function instead of \p equal_to for \p my_data + we get as a result a cuckoo set with ordered probe set that may improve + performance. + Example for ordered vector-based probe-set: + + \code + #include + + // Data stored in cuckoo set + struct my_data + { + // key field + std::string strKey; + + // other data + // ... + }; + + // Provide compare functor for my_data since we want to use ordered probe-set + struct my_data_compare { + int operator()( const my_data& d1, const my_data& d2 ) const + { + return d1.strKey.compare( d2.strKey ); + } + + int operator()( const my_data& d, const std::string& s ) const + { + return d.strKey.compare(s); + } + + int operator()( const std::string& s, const my_data& d ) const + { + return s.compare( d.strKey ); + } + }; + + // Provide two hash functor for my_data + struct hash1 { + size_t operator()(std::string const& s) const + { + return cds::opt::v::hash( s ); + } + size_t operator()( my_data const& d ) const + { + return (*this)( d.strKey ); + } + }; + + struct hash2: private hash1 { + size_t operator()(std::string const& s) const + { + size_t h = ~( hash1::operator()(s)); + return ~h + 0x9e3779b9 + (h << 6) + (h >> 2); + } + size_t operator()( my_data const& d ) const + { + return (*this)( d.strKey ); + } + }; + + // Declare type traits + // We use a vector of capacity 4 as probe-set container and store hash values in the node + struct my_traits: public cds::container::cuckoo::type_traits + { + typedef my_data_compare compare; + typedef std::tuple< hash1, hash2 > hash; + typedef cds::container::cuckoo::vector<4> probeset_type; + + static bool const store_hash = true; + }; + + // Declare CuckooSet type + typedef cds::container::CuckooSet< my_data, my_traits > my_cuckoo_set; + + // Equal option-based declaration + typedef cds::container::CuckooSet< my_data, + cds::container::cuckoo::make_traits< + cds::opt::hash< std::tuple< hash1, hash2 > > + ,cds::opt::compare< my_data_compare > + ,cds::container::cuckoo::probeset_type< cds::container::cuckoo::vector<4> > + ,cds::container::cuckoo::store_hash< true > + >::type + > opt_cuckoo_set; + \endcode + + */ + template + class CuckooSet: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::CuckooSet +#else + protected details::make_cuckoo_set::type +#endif + { + //@cond + typedef details::make_cuckoo_set maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef T value_type ; ///< value type stored in the container + typedef Traits options ; ///< traits + + typedef typename options::hash hash ; ///< hash functor tuple wrapped for internal use + typedef typename base_class::hash_tuple_type hash_tuple_type ; ///< Type of hash tuple + + typedef typename base_class::mutex_policy mutex_policy ; ///< Concurrent access policy, see cuckoo::type_traits::mutex_policy + typedef typename base_class::stat stat ; ///< internal statistics type + + + static bool const c_isSorted = base_class::c_isSorted ; ///< whether the probe set should be ordered + static size_t const c_nArity = base_class::c_nArity ; ///< the arity of cuckoo hashing: the number of hash functors provided; minimum 2. + + typedef typename base_class::key_equal_to key_equal_to ; ///< Key equality functor; used only for unordered probe-set + + typedef typename base_class::key_comparator key_comparator ; ///< key comparing functor based on opt::compare and opt::less option setter. Used only for ordered probe set + + typedef typename base_class::allocator allocator ; ///< allocator type used for internal bucket table allocations + + /// Node allocator type + typedef typename std::conditional< + std::is_same< typename options::node_allocator, opt::none >::value, + allocator, + typename options::node_allocator + >::type node_allocator; + + /// item counter type + typedef typename options::item_counter item_counter; + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef cds::details::Allocator< node_type, node_allocator > cxx_node_allocator; + //@endcond + + public: + static unsigned int const c_nDefaultProbesetSize = base_class::c_nDefaultProbesetSize ; ///< default probeset size + static size_t const c_nDefaultInitialSize = base_class::c_nDefaultInitialSize ; ///< default initial size + static unsigned int const c_nRelocateLimit = base_class::c_nRelocateLimit ; ///< Count of attempts to relocate before giving up + + protected: + //@cond + template + static node_type * alloc_node( Q const& v ) + { + return cxx_node_allocator().New( v ); + } +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_node_allocator().MoveNew( std::forward(args)... ); + } +# endif + + static void free_node( node_type * pNode ) + { + cxx_node_allocator().Delete( pNode ); + } + //@endcond + + protected: + //@cond + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_insert_functor { + void operator()( value_type& ) const + {} + }; + + struct empty_find_functor { + template + void operator()( node_type& item, Q& val ) const + {} + }; + + template + class insert_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + insert_wrapper( Func f ): base_class(f) {} + + void operator()( node_type& node ) + { + base_class::get()( node.m_val ); + } + }; + + template + class ensure_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + Q const& val; + + ensure_wrapper( Q const& v, Func f) : base_class(f), val(v) {} + + void operator()( bool bNew, node_type& item, node_type const& ) + { + base_class::get()( bNew, item.m_val, val ); + } + }; + + template + class find_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + find_wrapper( Func f ) + : base_class(f) + {} + + template + void operator()( node_type& item, Q& val ) + { + base_class::get()( item.m_val, val ); + } + }; +# endif + //@endcond + + public: + /// Default constructor + /** + Initial size = \ref c_nDefaultInitialSize + + Probe set size: + - \ref c_nDefaultProbesetSize if \p probeset_type is \p cuckoo::list + - \p Capacity if \p probeset_type is cuckoo::vector + + Probe set threshold = probe set size - 1 + */ + CuckooSet() + {} + + /// Constructs the set object with given probe set size and threshold + /** + If probe set type is cuckoo::vector vector + then \p nProbesetSize should be equal to vector's \p Capacity. + */ + CuckooSet( + size_t nInitialSize ///< Initial set size; if 0 - use default initial size \ref c_nDefaultInitialSize + , unsigned int nProbesetSize ///< probe set size + , unsigned int nProbesetThreshold = 0 ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 + ) + : base_class( nInitialSize, nProbesetSize, nProbesetThreshold ) + {} + + /// Constructs the set object with given hash functor tuple + /** + The probe set size and threshold are set as default, see CuckooSet() + */ + CuckooSet( + hash_tuple_type const& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : base_class( h ) + {} + + /// Constructs the set object with given probe set properties and hash functor tuple + /** + If probe set type is cuckoo::vector vector + then \p nProbesetSize should be equal to vector's \p Capacity. + */ + CuckooSet( + size_t nInitialSize ///< Initial set size; if 0 - use default initial size \ref c_nDefaultInitialSize + , unsigned int nProbesetSize ///< probe set size + , unsigned int nProbesetThreshold ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 + , hash_tuple_type const& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : base_class( nInitialSize, nProbesetSize, nProbesetThreshold, h ) + {} + +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + /// Constructs the set object with given hash functor tuple (move semantics) + /** + The probe set size and threshold are set as default, see CuckooSet() + */ + CuckooSet( + hash_tuple_type&& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : base_class( std::forward(h) ) + {} + + /// Constructs the set object with given probe set properties and hash functor tuple (move semantics) + /** + If probe set type is cuckoo::vector vector + then \p nProbesetSize should be equal to vector's \p Capacity. + */ + CuckooSet( + size_t nInitialSize ///< Initial set size; if 0 - use default initial size \ref c_nDefaultInitialSize + , unsigned int nProbesetSize ///< probe set size + , unsigned int nProbesetThreshold ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 + , hash_tuple_type&& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : base_class( nInitialSize, nProbesetSize, nProbesetThreshold, std::forward(h) ) + {} +# endif // ifdef CDS_MOVE_SEMANTICS_SUPPORT + + /// Destructor clears the set + ~CuckooSet() + { + clear(); + } + + public: + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain as minimum the complete key for the node. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q const& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return insert( val, []( value_type& ) {} ); +# else + return insert( val, empty_insert_functor() ); +# endif + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of new item . + + The functor signature is: + \code + void func( value_type& item ); + \endcode + where \p item is the item inserted. + + The type \p Q can differ from \ref value_type of items storing in the set. + Therefore, the \p value_type should be constructible from type \p Q. + + The user-defined functor is called only if the inserting is success. It can be passed by reference + using boost::ref + */ + template + bool insert( Q const& val, Func f ) + { + scoped_node_ptr pNode( alloc_node( val )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( base_class::insert( *pNode, [&f]( node_type& node ) { cds::unref(f)( node.m_val ); } )) +# else + insert_wrapper wrapper( f ); + if ( base_class::insert( *pNode, cds::ref(wrapper) )) +# endif + { + pNode.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr pNode( alloc_node( std::forward(args)... )); + if ( base_class::insert( *pNode )) { + pNode.release(); + return true; + } + return false; + } +# endif + + /// Ensures that the \p val exists in the set + /** + The operation performs inserting or changing data. + + If the \p val key not found in the set, then the new item created from \p val + is inserted into the set. Otherwise, the functor \p func is called with the item found. + The functor \p Func should be a function with signature: + \code + void func( bool bNew, value_type& item, const Q& val ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p ensure function + + The functor can change non-key fields of the \p item. + + You can pass \p func argument by value or by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p val key + already exists. + */ + template + std::pair ensure( Q const& val, Func func ) + { + scoped_node_ptr pNode( alloc_node( val )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair res = base_class::ensure( *pNode, + [&val,&func](bool bNew, node_type& item, node_type const& ){ cds::unref(func)( bNew, item.m_val, val ); } + ); +# else + ensure_wrapper wrapper( val, func ); + std::pair res = base_class::ensure( *pNode, cds::ref(wrapper) ); +# endif + if ( res.first && res.second ) + pNode.release(); + return res; + } + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_CuckooSet_erase + + Since the key of set's item type \ref value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The set item comparator should be able to compare the type \p value_type + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + node_type * pNode = base_class::erase( key ); + if ( pNode ) { + free_node( pNode ); + return true; + } + return false; + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_CuckooSet_erase "erase(Q const&)" + but \p pred is used for key comparing. + If cuckoo set is ordered, then \p Predicate should have the interface and semantics like \p std::less. + If cuckoo set is unordered, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p Predicate must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Predicate pred ) + { + node_type * pNode = base_class::erase_with( key, typename maker::template predicate_wrapper() ); + if ( pNode ) { + free_node( pNode ); + return true; + } + return false; + } + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_CuckooSet_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface is: + \code + struct functor { + void operator()(value_type const& val); + }; + \endcode + The functor can be passed by value or by reference using boost:ref + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { + node_type * pNode = base_class::erase( key ); + if ( pNode ) { + cds::unref(f)( pNode->m_val ); + free_node( pNode ); + return true; + } + return false; + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_CuckooSet_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + If you use ordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::less. + If you use unordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p Predicate must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Predicate pred, Func f ) + { + node_type * pNode = base_class::erase_with( key, typename maker::template predicate_wrapper() ); + if ( pNode ) { + cds::unref(f)( pNode->m_val ); + free_node( pNode ); + return true; + } + return false; + } + + /// Find the key \p val + /** \anchor cds_nonintrusive_CuckooSet_find_func + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + The type \p Q can differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( val, [&f](node_type& item, Q& v) { cds::unref(f)( item.m_val, v );}); +# else + find_wrapper wrapper(f); + return base_class::find( val, cds::ref(wrapper) ); +# endif + } + + /// Find the key \p val using \p pred predicate for comparing + /** + The function is an analog of \ref cds_nonintrusive_CuckooSet_find_func "find(Q&, Func)" + but \p pred is used for key comparison. + If you use ordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::less. + If you use unordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Predicate pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( val, typename maker::template predicate_wrapper(), + [&f](node_type& item, Q& v) { cds::unref(f)( item.m_val, v );}); +# else + find_wrapper wrapper(f); + return base_class::find_with( val, typename maker::template predicate_wrapper(), cds::ref(wrapper) ); +# endif + } + + /// Find the key \p val + /** \anchor cds_nonintrusive_CuckooSet_find_cfunc + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. + + The type \p Q can differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( val, [&f](node_type& item, Q const& v) { cds::unref(f)( item.m_val, v );}); +# else + find_wrapper wrapper(f); + return base_class::find( val, cds::ref(wrapper) ); +# endif + } + + /// Find the key \p val using \p pred predicate for comparing + /** + The function is an analog of \ref cds_nonintrusive_CuckooSet_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparison. + If you use ordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::less. + If you use unordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Predicate pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( val, typename maker::template predicate_wrapper(), + [&f](node_type& item, Q const& v) { cds::unref(f)( item.m_val, v );}); +# else + find_wrapper wrapper(f); + return base_class::find_with( val, typename maker::template predicate_wrapper(), cds::ref(wrapper) ); +# endif + } + + /// Find the key \p val + /** \anchor cds_nonintrusive_CuckooSet_find_val + + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \ref value_type. + */ + template + bool find( Q const& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( val, [](node_type&, Q const&) {}); +# else + return base_class::find( val, empty_find_functor()); +# endif + } + + /// Find the key \p val using \p pred predicate for comparing + /** + The function is an analog of \ref cds_nonintrusive_CuckooSet_find_val "find(Q const&)" + but \p pred is used for key comparison. + If you use ordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::less. + If you use unordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Predicate pred ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( val, typename maker::template predicate_wrapper(), [](node_type&, Q const&) {}); +# else + return base_class::find_with( val, typename maker::template predicate_wrapper(), empty_find_functor()); +# endif + } + + /// Clears the set + /** + The function erases all items from the set. + */ + void clear() + { + return base_class::clear_and_dispose( node_disposer() ); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + size_t size() const + { + return base_class::size(); + } + + /// Returns the size of hash table + /** + The hash table size is non-constant and can be increased via resizing. + */ + size_t bucket_count() const + { + return base_class::bucket_count(); + } + + /// Returns lock array size + size_t lock_count() const + { + return base_class::lock_count(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Returns const reference to mutex policy internal statistics + typename mutex_policy::statistics_type const& mutex_policy_statistics() const + { + return base_class::mutex_policy_statistics(); + } + + }; + +}} // namespace cds::container + +#endif //#ifndef __CDS_CONTAINER_CUCKOO_SET_H diff --git a/cds/container/details/guarded_ptr_cast.h b/cds/container/details/guarded_ptr_cast.h new file mode 100644 index 00000000..351e060d --- /dev/null +++ b/cds/container/details/guarded_ptr_cast.h @@ -0,0 +1,30 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_DETAILS_GUARDED_PTR_CAST_H +#define __CDS_CONTAINER_DETAILS_GUARDED_PTR_CAST_H +//@cond + +#include + +namespace cds { namespace container { namespace details { + + template + struct guarded_ptr_cast_set { + T * operator()(Node* pNode ) const CDS_NOEXCEPT + { + return &(pNode->m_Value); + } + }; + + template + struct guarded_ptr_cast_map { + T * operator()(Node* pNode ) const CDS_NOEXCEPT + { + return &(pNode->m_Data); + } + }; + +}}} // namespace cds::container::details + +//@endcond +#endif // #ifndef __CDS_CONTAINER_DETAILS_GUARDED_PTR_CAST_H diff --git a/cds/container/details/make_lazy_kvlist.h b/cds/container/details/make_lazy_kvlist.h new file mode 100644 index 00000000..89351f75 --- /dev/null +++ b/cds/container/details/make_lazy_kvlist.h @@ -0,0 +1,89 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_DETAILS_MAKE_LAZY_KVLIST_H +#define __CDS_CONTAINER_DETAILS_MAKE_LAZY_KVLIST_H + +#include + +namespace cds { namespace container { + + //@cond + namespace details { + + template + struct make_lazy_kvlist + { + typedef Traits original_type_traits; + + typedef GC gc; + typedef K key_type; + typedef T value_type; + typedef std::pair pair_type; + + struct node_type: public intrusive::lazy_list::node + { + pair_type m_Data; + + template + node_type( Q const& key ) + : m_Data( key, value_type() ) + {} + + template + explicit node_type( std::pair const& pair ) + : m_Data( pair ) + {} + + template + node_type( Q const& key, R const& value ) + : m_Data( key, value ) + {} + +# ifdef CDS_EMPLACE_SUPPORT + template + node_type( Ky&& key, Args&&... args ) + : m_Data( std::forward(key), std::move( value_type( std::forward(args)...))) + {} +# endif + }; + + typedef typename original_type_traits::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + + struct key_field_accessor { + key_type const& operator()( node_type const& pair ) + { + return pair.m_Data.first; + } + }; + + typedef typename opt::details::make_comparator< key_type, original_type_traits >::type key_comparator; + + template + struct less_wrapper { + typedef cds::details::compare_wrapper< node_type, cds::opt::details::make_comparator_from_less, key_field_accessor > type; + }; + + struct type_traits: public original_type_traits + { + typedef intrusive::lazy_list::base_hook< opt::gc > hook; + typedef node_deallocator disposer; + typedef cds::details::compare_wrapper< node_type, key_comparator, key_field_accessor > compare; + }; + + typedef intrusive::LazyList type; + }; + } // namespace details + //@endcond + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_DETAILS_MAKE_LAZY_KVLIST_H diff --git a/cds/container/details/make_lazy_list.h b/cds/container/details/make_lazy_list.h new file mode 100644 index 00000000..30b30c6f --- /dev/null +++ b/cds/container/details/make_lazy_list.h @@ -0,0 +1,79 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_DETAILS_MAKE_LAZY_LIST_H +#define __CDS_CONTAINER_DETAILS_MAKE_LAZY_LIST_H + +#include + +namespace cds { namespace container { + + //@cond + namespace details { + + template + struct make_lazy_list + { + typedef GC gc; + typedef T value_type; + typedef Traits original_type_traits; + + struct node_type : public intrusive::lazy_list::node + { + value_type m_Value; + + node_type() + {} + + template + node_type( Q const& v ) + : m_Value(v) + {} +# ifdef CDS_EMPLACE_SUPPORT + template + node_type( Args&&... args ) + : m_Value( std::forward(args)...) + {} +# endif + }; + + typedef typename original_type_traits::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + + typedef typename opt::details::make_comparator< value_type, original_type_traits >::type key_comparator; + + struct value_accessor { + value_type const & operator()( node_type const & node ) const + { + return node.m_Value; + } + }; + + template + struct less_wrapper { + typedef cds::details::compare_wrapper< node_type, cds::opt::details::make_comparator_from_less, value_accessor > type; + }; + + struct type_traits: public original_type_traits + { + typedef intrusive::lazy_list::base_hook< opt::gc > hook; + typedef node_deallocator disposer; + + typedef cds::details::compare_wrapper< node_type, key_comparator, value_accessor > compare; + }; + + typedef intrusive::LazyList type; + }; + } // namespace details + //@endcond + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_DETAILS_MAKE_MICHAEL_LIST_H diff --git a/cds/container/details/make_michael_kvlist.h b/cds/container/details/make_michael_kvlist.h new file mode 100644 index 00000000..9d0bcf23 --- /dev/null +++ b/cds/container/details/make_michael_kvlist.h @@ -0,0 +1,89 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_DETAILS_MAKE_MICHAEL_KVLIST_H +#define __CDS_CONTAINER_DETAILS_MAKE_MICHAEL_KVLIST_H + +#include + +namespace cds { namespace container { + + //@cond + namespace details { + + template + struct make_michael_kvlist + { + typedef Traits original_type_traits; + + typedef GC gc; + typedef K key_type; + typedef T value_type; + typedef std::pair pair_type; + + struct node_type: public intrusive::michael_list::node + { + pair_type m_Data; + + template + node_type( Q const& key ) + : m_Data( key, value_type() ) + {} + + template + explicit node_type( std::pair const& pair ) + : m_Data( pair ) + {} + + template + node_type( Q const& key, R const& value ) + : m_Data( key, value ) + {} + +# ifdef CDS_EMPLACE_SUPPORT + template< typename Ky, typename... Args> + node_type( Ky&& key, Args&&... args ) + : m_Data( std::forward(key), std::move( value_type( std::forward(args)...))) + {} +# endif + }; + + typedef typename original_type_traits::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + + struct key_field_accessor { + key_type const& operator()( node_type const& pair ) + { + return pair.m_Data.first; + } + }; + + typedef typename opt::details::make_comparator< key_type, original_type_traits >::type key_comparator; + + template + struct less_wrapper { + typedef cds::details::compare_wrapper< node_type, cds::opt::details::make_comparator_from_less, key_field_accessor > type; + }; + + struct type_traits: public original_type_traits + { + typedef intrusive::michael_list::base_hook< opt::gc > hook; + typedef node_deallocator disposer; + typedef cds::details::compare_wrapper< node_type, key_comparator, key_field_accessor > compare; + }; + + typedef intrusive::MichaelList type; + }; + } // namespace details + //@endcond + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_DETAILS_MAKE_MICHAEL_KVLIST_H diff --git a/cds/container/details/make_michael_list.h b/cds/container/details/make_michael_list.h new file mode 100644 index 00000000..8d65e28b --- /dev/null +++ b/cds/container/details/make_michael_list.h @@ -0,0 +1,82 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_DETAILS_MAKE_MICHAEL_LIST_H +#define __CDS_CONTAINER_DETAILS_MAKE_MICHAEL_LIST_H + +#include + +namespace cds { namespace container { + + //@cond + namespace details { + + template + struct make_michael_list + { + typedef GC gc; + typedef T value_type; + + struct node_type : public intrusive::michael_list::node + { + value_type m_Value; + + node_type() + {} + + template + node_type( Q const& v ) + : m_Value(v) + {} + +# ifdef CDS_EMPLACE_SUPPORT + template + node_type( Args&&... args ) + : m_Value( std::forward(args)... ) + {} +# endif + }; + + typedef Traits original_type_traits; + + typedef typename original_type_traits::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + + typedef typename opt::details::make_comparator< value_type, original_type_traits >::type key_comparator; + + struct value_accessor + { + value_type const & operator()( node_type const& node ) const + { + return node.m_Value; + } + }; + + template + struct less_wrapper { + typedef cds::details::compare_wrapper< node_type, cds::opt::details::make_comparator_from_less, value_accessor > type; + }; + + struct type_traits: public original_type_traits + { + typedef intrusive::michael_list::base_hook< opt::gc > hook; + typedef node_deallocator disposer; + + typedef cds::details::compare_wrapper< node_type, key_comparator, value_accessor > compare; + }; + + typedef intrusive::MichaelList type; + }; + } // namespace details + //@endcond + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_DETAILS_MAKE_MICHAEL_LIST_H diff --git a/cds/container/details/make_skip_list_map.h b/cds/container/details/make_skip_list_map.h new file mode 100644 index 00000000..b91c9983 --- /dev/null +++ b/cds/container/details/make_skip_list_map.h @@ -0,0 +1,131 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_DETAILS_MAKE_SKIP_LIST_MAP_H +#define __CDS_CONTAINER_DETAILS_MAKE_SKIP_LIST_MAP_H + +#include +#include + +//@cond +namespace cds { namespace container { namespace details { + + template + struct make_skip_list_map + { + typedef GC gc; + typedef K key_type; + typedef T mapped_type; + typedef std::pair< key_type const, mapped_type> value_type; + typedef Traits type_traits; + + typedef cds::intrusive::skip_list::node< gc > intrusive_node_type; + struct node_type: public intrusive_node_type + { + typedef intrusive_node_type base_class; + typedef typename base_class::atomic_marked_ptr atomic_marked_ptr; + typedef value_type stored_value_type; + + value_type m_Value; + //atomic_marked_ptr m_arrTower[] ; // allocated together with node_type in single memory block + + template + node_type( unsigned int nHeight, atomic_marked_ptr * pTower, Q const& key ) + : m_Value( std::make_pair( key, mapped_type() )) + { + init_tower( nHeight, pTower ); + } + + template + node_type( unsigned int nHeight, atomic_marked_ptr * pTower, Q const& key, U const& val ) + : m_Value( std::make_pair( key, val )) + { + init_tower( nHeight, pTower ); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + node_type( unsigned int nHeight, atomic_marked_ptr * pTower, Q&& key, Args&&... args ) + : m_Value( std::forward(key), std::move( mapped_type( std::forward(args)... ))) + { + init_tower( nHeight, pTower ); + } +# endif + + private: + node_type() ; // no default ctor + + void init_tower( unsigned int nHeight, atomic_marked_ptr * pTower ) + { + if ( nHeight > 1 ) { + new (pTower) atomic_marked_ptr[ nHeight - 1 ]; + base_class::make_tower( nHeight, pTower ); + } + } + }; + + class node_allocator: public skip_list::details::node_allocator< node_type, type_traits> + { + typedef skip_list::details::node_allocator< node_type, type_traits> base_class; + public: + template + node_type * New( unsigned int nHeight, Q const& key ) + { + return base_class::New( nHeight, key ); + } + template + node_type * New( unsigned int nHeight, Q const& key, U const& val ) + { + unsigned char * pMem = base_class::alloc_space( nHeight ); + return new( pMem ) + node_type( nHeight, + nHeight > 1 ? reinterpret_cast( pMem + base_class::c_nNodeSize ) + : null_ptr(), + key, val ); + } +# ifdef CDS_EMPLACE_SUPPORT + template + node_type * New( unsigned int nHeight, Args&&... args ) + { + unsigned char * pMem = base_class::alloc_space( nHeight ); + return new( pMem ) + node_type( nHeight, nHeight > 1 ? reinterpret_cast( pMem + base_class::c_nNodeSize ) + : null_ptr(), + std::forward(args)... ); + } +# endif + }; + + struct node_deallocator { + void operator ()( node_type * pNode ) + { + node_allocator().Delete( pNode ); + } + }; + + typedef skip_list::details::dummy_node_builder dummy_node_builder; + + struct key_accessor + { + key_type const & operator()( node_type const& node ) const + { + return node.m_Value.first; + } + }; + typedef typename opt::details::make_comparator< key_type, type_traits >::type key_comparator; + + class intrusive_type_traits: public cds::intrusive::skip_list::make_traits< + cds::opt::type_traits< type_traits > + ,cds::intrusive::opt::hook< intrusive::skip_list::base_hook< cds::opt::gc< gc > > > + ,cds::intrusive::opt::disposer< node_deallocator > + ,cds::intrusive::skip_list::internal_node_builder< dummy_node_builder > + ,cds::opt::compare< cds::details::compare_wrapper< node_type, key_comparator, key_accessor > > + >::type + {}; + + typedef cds::intrusive::SkipListSet< gc, node_type, intrusive_type_traits> type; + }; + +}}} // namespace cds::container::details +//@endcond + +#endif // __CDS_CONTAINER_DETAILS_MAKE_SKIP_LIST_MAP_H diff --git a/cds/container/details/make_skip_list_set.h b/cds/container/details/make_skip_list_set.h new file mode 100644 index 00000000..e88ee5c0 --- /dev/null +++ b/cds/container/details/make_skip_list_set.h @@ -0,0 +1,94 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_DETAILS_MAKE_SKIP_LIST_SET_H +#define __CDS_CONTAINER_DETAILS_MAKE_SKIP_LIST_SET_H + +#include +#include + +//@cond +namespace cds { namespace container { namespace details { + + template + struct make_skip_list_set + { + typedef GC gc; + typedef T value_type; + typedef Traits type_traits; + + typedef cds::intrusive::skip_list::node< gc > intrusive_node_type; + struct node_type: public intrusive_node_type + { + typedef intrusive_node_type base_class; + typedef typename base_class::atomic_marked_ptr atomic_marked_ptr; + typedef value_type stored_value_type; + + value_type m_Value; + //atomic_marked_ptr m_arrTower[] ; // allocated together with node_type in single memory block + + template + node_type( unsigned int nHeight, atomic_marked_ptr * pTower, Q const& v ) + : m_Value(v) + { + if ( nHeight > 1 ) { + new (pTower) atomic_marked_ptr[ nHeight - 1 ]; + base_class::make_tower( nHeight, pTower ); + } + } + +# ifdef CDS_EMPLACE_SUPPORT + template + node_type( unsigned int nHeight, atomic_marked_ptr * pTower, Q&& q, Args&&... args ) + : m_Value( std::forward(q), std::forward(args)... ) + { + if ( nHeight > 1 ) { + new (pTower) atomic_marked_ptr[ nHeight - 1 ]; + base_class::make_tower( nHeight, pTower ); + } + } +# endif + + private: + node_type() ; // no default ctor + }; + + typedef skip_list::details::node_allocator< node_type, type_traits> node_allocator; + + struct node_deallocator { + void operator ()( node_type * pNode ) + { + node_allocator().Delete( pNode ); + } + }; + + typedef skip_list::details::dummy_node_builder dummy_node_builder; + + struct value_accessor + { + value_type const& operator()( node_type const& node ) const + { + return node.m_Value; + } + }; + typedef typename opt::details::make_comparator< value_type, type_traits >::type key_comparator; + + template + struct less_wrapper { + typedef cds::details::compare_wrapper< node_type, cds::opt::details::make_comparator_from_less, value_accessor > type; + }; + + class intrusive_type_traits: public cds::intrusive::skip_list::make_traits< + cds::opt::type_traits< type_traits > + ,cds::intrusive::opt::hook< intrusive::skip_list::base_hook< cds::opt::gc< gc > > > + ,cds::intrusive::opt::disposer< node_deallocator > + ,cds::intrusive::skip_list::internal_node_builder< dummy_node_builder > + ,cds::opt::compare< cds::details::compare_wrapper< node_type, key_comparator, value_accessor > > + >::type + {}; + + typedef cds::intrusive::SkipListSet< gc, node_type, intrusive_type_traits> type; + }; +}}} // namespace cds::container::details +//@endcond + +#endif //#ifndef __CDS_CONTAINER_DETAILS_MAKE_SKIP_LIST_SET_H diff --git a/cds/container/details/make_split_list_set.h b/cds/container/details/make_split_list_set.h new file mode 100644 index 00000000..1a5d02e3 --- /dev/null +++ b/cds/container/details/make_split_list_set.h @@ -0,0 +1,239 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_H +#define __CDS_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_H + +#include +#include +#include + +//@cond +namespace cds { namespace container { + + // Forward declaration + struct michael_list_tag; + struct lazy_list_tag; + + namespace details { + +#ifdef __CDS_CONTAINER_MICHAEL_LIST_BASE_H + // if michael_list included + + template + struct make_split_list_set< GC, T, michael_list_tag, Traits > + { + typedef GC gc; + typedef T value_type; + typedef Traits original_type_traits; + + typedef typename cds::opt::select_default< + typename original_type_traits::ordered_list_traits, + cds::container::michael_list::type_traits + >::type original_ordered_list_traits; + + typedef cds::intrusive::split_list::node< cds::intrusive::michael_list::node > primary_node_type; + struct node_type: public primary_node_type + { + value_type m_Value; + + template + explicit node_type( Q const& v ) + : m_Value(v) + {} +# ifdef CDS_EMPLACE_SUPPORT + template + explicit node_type( Q&& q, Args&&... args ) + : m_Value( std::forward(q), std::forward(args)... ) + {} +# endif + private: + node_type() ; // no default ctor + }; + + typedef typename cds::opt::select_default< + typename original_type_traits::ordered_list_traits, + typename original_type_traits::allocator, + typename cds::opt::select_default< + typename original_type_traits::ordered_list_traits::allocator, + typename original_type_traits::allocator + >::type + >::type node_allocator_; + + typedef typename node_allocator_::template rebind::other node_allocator_type; + + typedef cds::details::Allocator< node_type, node_allocator_type > cxx_node_allocator; + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_node_allocator().Delete( pNode ); + } + }; + + typedef typename opt::details::make_comparator< value_type, original_ordered_list_traits >::type key_comparator; + + typedef typename original_type_traits::key_accessor key_accessor; + + struct value_accessor + { + typename key_accessor::key_type const& operator()( node_type const& node ) const + { + return key_accessor()(node.m_Value); + } + }; + + template + struct predicate_wrapper { + typedef cds::details::predicate_wrapper< node_type, Predicate, value_accessor > type; + }; + + struct ordered_list_traits: public original_ordered_list_traits + { + typedef cds::intrusive::michael_list::base_hook< + opt::gc + > hook; + typedef atomicity::empty_item_counter item_counter; + typedef node_deallocator disposer; + typedef cds::details::compare_wrapper< node_type, key_comparator, value_accessor > compare; + }; + + struct type_traits: public original_type_traits + { + struct hash: public original_type_traits::hash + { + typedef typename original_type_traits::hash base_class; + + size_t operator()(node_type const& v ) const + { + return base_class::operator()( key_accessor()( v.m_Value ) ); + } + template + size_t operator()( Q const& k ) const + { + return base_class::operator()( k ); + } + //using base_class::operator(); + }; + }; + + typedef cds::intrusive::MichaelList< gc, node_type, ordered_list_traits > ordered_list; + typedef cds::intrusive::SplitListSet< gc, ordered_list, type_traits > type; + }; +#endif // ifdef __CDS_CONTAINER_MICHAEL_LIST_BASE_H + +#ifdef __CDS_CONTAINER_LAZY_LIST_BASE_H + // if lazy_list included + template + struct make_split_list_set< GC, T, lazy_list_tag, Traits > + { + typedef GC gc; + typedef T value_type; + typedef Traits original_type_traits; + + typedef typename cds::opt::select_default< + typename original_type_traits::ordered_list_traits, + cds::container::lazy_list::type_traits + >::type original_ordered_list_traits; + + typedef typename cds::opt::select_default< + typename original_ordered_list_traits::lock_type, + typename cds::container::lazy_list::type_traits::lock_type + >::type lock_type; + + typedef cds::intrusive::split_list::node< cds::intrusive::lazy_list::node > primary_node_type; + struct node_type: public primary_node_type + { + value_type m_Value; + + template + explicit node_type( const Q& v ) + : m_Value(v) + {} + +# ifdef CDS_EMPLACE_SUPPORT + template + explicit node_type( Q&& q, Args&&... args ) + : m_Value( std::forward(q), std::forward(args)... ) + {} +# endif + private: + node_type() ; // no default ctor + }; + + typedef typename cds::opt::select_default< + typename original_type_traits::ordered_list_traits, + typename original_type_traits::allocator, + typename cds::opt::select_default< + typename original_type_traits::ordered_list_traits::allocator, + typename original_type_traits::allocator + >::type + >::type node_allocator_; + + typedef typename node_allocator_::template rebind::other node_allocator_type; + + typedef cds::details::Allocator< node_type, node_allocator_type > cxx_node_allocator; + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_node_allocator().Delete( pNode ); + } + }; + + typedef typename opt::details::make_comparator< value_type, original_ordered_list_traits >::type key_comparator; + + typedef typename original_type_traits::key_accessor key_accessor; + + struct value_accessor + { + typename key_accessor::key_type const & operator()( node_type const & node ) const + { + return key_accessor()(node.m_Value); + } + }; + + template + struct predicate_wrapper { + typedef cds::details::predicate_wrapper< node_type, Predicate, value_accessor > type; + }; + + struct ordered_list_traits: public original_ordered_list_traits + { + typedef cds::intrusive::lazy_list::base_hook< + opt::gc + ,opt::lock_type< lock_type > + > hook; + typedef atomicity::empty_item_counter item_counter; + typedef node_deallocator disposer; + typedef cds::details::compare_wrapper< node_type, key_comparator, value_accessor > compare; + }; + + struct type_traits: public original_type_traits + { + struct hash: public original_type_traits::hash + { + typedef typename original_type_traits::hash base_class; + + size_t operator()(node_type const& v ) const + { + return base_class::operator()( key_accessor()( v.m_Value )); + } + template + size_t operator()( Q const& k ) const + { + return base_class::operator()( k ); + } + //using base_class::operator(); + }; + }; + + typedef cds::intrusive::LazyList< gc, node_type, ordered_list_traits > ordered_list; + typedef cds::intrusive::SplitListSet< gc, ordered_list, type_traits > type; + }; +#endif // ifdef __CDS_CONTAINER_LAZY_LIST_BASE_H + + } // namespace details +}} // namespace cds::container +//@endcond + +#endif // #ifndef __CDS_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_H diff --git a/cds/container/ellen_bintree_base.h b/cds/container/ellen_bintree_base.h new file mode 100644 index 00000000..80d8e6df --- /dev/null +++ b/cds/container/ellen_bintree_base.h @@ -0,0 +1,374 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_ELLEN_BINTREE_BASE_H +#define __CDS_CONTAINER_ELLEN_BINTREE_BASE_H + +#include +#include +#include +#include + + +namespace cds { namespace container { + /// EllenBinTree related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace ellen_bintree { + +#ifdef CDS_DOXYGEN_INVOKED + /// Typedef for cds::intrusive::ellen_bintree::update_desc + typedef cds::intrusive::ellen_bintree::update_desc update_desc; + + /// Typedef for cds::intrusive::ellen_bintree::internal_node + typedef cds::intrusive::ellen_bintree::internal_node internal_node; + + /// Typedef for cds::intrusive::ellen_bintree::key_extractor + typedef cds::intrusive::ellen_bintree::key_extractor key_extractor; + + /// Typedef for cds::intrusive::ellen_bintree::update_desc_allocator + typedef cds::intrusive::ellen_bintree::update_desc_allocator update_desc_allocator; + + /// Typedef for cds::intrusive::ellen_bintree::stat + typedef cds::intrusive::ellen_bintree::stat stat; + + /// Typedef for cds::intrusive::ellen_bintree::empty_stat + typedef cds::intrusive::ellen_bintree::empty_stat empty_stat; +#else + using cds::intrusive::ellen_bintree::update_desc; + using cds::intrusive::ellen_bintree::internal_node; + using cds::intrusive::ellen_bintree::key_extractor; + using cds::intrusive::ellen_bintree::update_desc_allocator; + using cds::intrusive::ellen_bintree::stat; + using cds::intrusive::ellen_bintree::empty_stat; + using cds::intrusive::ellen_bintree::node_types; +#endif + + /// EllenBinTree leaf node + template + struct node: public cds::intrusive::ellen_bintree::node + { + typedef T value_type ; ///< Value type + + T m_Value ; ///< Value + + /// Default ctor + node() + {} + + /// Initializing ctor + template + node(Q const& v) + : m_Value(v) + {} + +#ifdef CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT + /// Copy constructor + template + node( Args const&... args) + : m_Value( args... ) + {} + +#ifdef CDS_RVALUE_SUPPORT + /// Move constructor + template + node( Args&&... args) + : m_Value( std::forward(args)... ) + {} +#endif // CDS_RVALUE_SUPPORT +#endif // CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT + }; + + /// EllenBinTreeMap leaf node + template + struct map_node: public cds::intrusive::ellen_bintree::node< GC > + { + typedef Key key_type ; ///< key type + typedef T mapped_type ; ///< value type + typedef std::pair value_type ; ///< key-value pair stored in the map + + value_type m_Value ; ///< Key-value pair stored in map leaf node + + /// Initializes key field, value if default-constructed + template + map_node( K const& key ) + : m_Value( std::make_pair( key_type(key), mapped_type() )) + {} + + /// Initializes key and value fields + template + map_node( K const& key, Q const& v ) + : m_Value( std::make_pair(key_type(key), mapped_type(v) )) + {} + }; + + /// Type traits for EllenBinTreeSet, EllenBinTreeMap and EllenBinTreePriorityQueue + struct type_traits + { + /// Key extracting functor (only for EllenBinTreeSet) + /** + You should explicit define a valid functor. + The functor has the following prototype: + \code + struct key_extractor { + void operator ()( Key& dest, T const& src ); + }; + \endcode + It should initialize \p dest key from \p src data. + The functor is used to initialize internal nodes. + */ + typedef opt::none key_extractor; + + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + + See cds::opt::compare option description for functor interface. + + You should provide \p compare or \p less functor. + See \ref cds_container_EllenBinTreeSet_rcu_less "predicate requirements". + */ + typedef opt::none compare; + + /// Specifies binary predicate used for key compare. + /** + See cds::opt::less option description for predicate interface. + + You should provide \p compare or \p less functor. + See \ref cds_container_EllenBinTreeSet_rcu_less "predicate requirements". + */ + typedef opt::none less; + + /// Item counter + /** + The type for item counting feature (see cds::opt::item_counter). + Default is no item counter (\ref atomicity::empty_item_counter) + */ + typedef atomicity::empty_item_counter item_counter; + + /// C++ memory ordering model + /** + List of available memory ordering see opt::memory_model + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Allocator for update descriptors + /** + The allocator type is used for \ref update_desc. + + Update descriptor is helping data structure with short lifetime and it is good candidate + for pooling. The number of simultaneously existing descriptors is a small number + limited the number of threads working with the tree. + Therefore, a bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue + is good choice for the free-list of update descriptors, + see cds::memory::vyukov_queue_pool free-list implementation. + + Also notice that size of update descriptor is not dependent on the type of data + stored in the tree so single free-list object can be used for several \p EllenBinTree object. + */ + typedef CDS_DEFAULT_ALLOCATOR update_desc_allocator; + + /// Allocator for internal nodes + /** + The allocator type is used for \ref internal_node. + */ + typedef CDS_DEFAULT_ALLOCATOR node_allocator; + + /// Allocator for leaf nodes + /** + Each leaf node contains data stored in the container. + */ + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Internal statistics + /** + Possible types: ellen_bintree::empty_stat (the default), ellen_bintree::stat or any + other with interface like \p %stat. + */ + typedef empty_stat stat; + + /// RCU deadlock checking policy (only for RCU-based EllenBinTreeXXX classes) + /** + List of available options see opt::rcu_check_deadlock + */ + typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock; + + /// Key copy policy (for EllenBinTreeMap) + /** + The key copy policy defines a functor to copy leaf node's key to internal node. + This policy is used only in EllenBinTreeMap. By default, assignment operator is used. + + The copy functor interface is: + \code + struct copy_functor { + void operator()( Key& dest, Key const& src ); + }; + \endcode + */ + typedef opt::none copy_policy; + }; + + + /// Metafunction converting option list to EllenBinTreeSet traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + \p Options list see \ref cds_container_EllenBinTreeSet "EllenBinTreeSet". + */ + template + struct make_set_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS11 >::type + ,CDS_OPTIONS11 + >::type type; +# endif + }; + + /// Metafunction converting option list to EllenBinTreeMap traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + \p Options list see \ref cds_container_EllenBinTreeMap "EllenBinTreeMap". + */ + template + struct make_map_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS11 >::type + ,CDS_OPTIONS11 + >::type type; +# endif + }; + + //@cond + namespace details { + + template < class GC, typename Key, typename T, class Traits> + struct make_ellen_bintree_set + { + typedef GC gc; + typedef Key key_type; + typedef T value_type; + typedef Traits original_type_traits; + + typedef node< gc, value_type > leaf_node; + + struct intrusive_key_extractor + { + void operator()( key_type& dest, leaf_node const& src ) const + { + typename original_type_traits::key_extractor()( dest, src.m_Value ); + } + }; + + struct value_accessor + { + value_type const& operator()( leaf_node const& node ) const + { + return node.m_Value; + } + }; + + typedef typename cds::opt::details::make_comparator< value_type, original_type_traits, false >::type key_comparator; + + typedef cds::details::Allocator< leaf_node, typename original_type_traits::allocator> cxx_leaf_node_allocator; + struct leaf_deallocator + { + void operator()( leaf_node * p ) const + { + cxx_leaf_node_allocator().Delete( p ); + } + }; + + struct intrusive_type_traits: public original_type_traits + { + typedef cds::intrusive::ellen_bintree::base_hook< cds::opt::gc< gc > > hook; + typedef intrusive_key_extractor key_extractor; + typedef leaf_deallocator disposer; + typedef cds::details::compare_wrapper< leaf_node, key_comparator, value_accessor > compare; + }; + + // Metafunction result + typedef cds::intrusive::EllenBinTree< gc, key_type, leaf_node, intrusive_type_traits > type; + }; + + template < class GC, typename Key, typename T, class Traits> + struct make_ellen_bintree_map + { + typedef GC gc; + typedef Key key_type; + typedef T mapped_type; + typedef map_node< gc, key_type, mapped_type > leaf_node; + typedef typename leaf_node::value_type value_type; + + typedef Traits original_type_traits; + + struct assignment_copy_policy { + void operator()( key_type& dest, key_type const& src ) + { + dest = src; + } + }; + typedef typename std::conditional< + std::is_same< typename original_type_traits::copy_policy, opt::none >::value, + assignment_copy_policy, + typename original_type_traits::copy_policy + >::type copy_policy; + + struct intrusive_key_extractor + { + void operator()( key_type& dest, leaf_node const& src ) const + { + copy_policy()( dest, src.m_Value.first ); + } + }; + + struct key_accessor + { + key_type const& operator()( leaf_node const& node ) const + { + return node.m_Value.first; + } + }; + + typedef typename cds::opt::details::make_comparator< key_type, original_type_traits, false >::type key_comparator; + + typedef cds::details::Allocator< leaf_node, typename original_type_traits::allocator> cxx_leaf_node_allocator; + struct leaf_deallocator + { + void operator()( leaf_node * p ) const + { + cxx_leaf_node_allocator().Delete( p ); + } + }; + + struct intrusive_type_traits: public original_type_traits + { + typedef cds::intrusive::ellen_bintree::base_hook< cds::opt::gc< gc > > hook; + typedef intrusive_key_extractor key_extractor; + typedef leaf_deallocator disposer; + typedef cds::details::compare_wrapper< leaf_node, key_comparator, key_accessor > compare; + }; + + // Metafunction result + typedef cds::intrusive::EllenBinTree< gc, key_type, leaf_node, intrusive_type_traits > type; + }; + + } // namespace details + //@endcond + } // namespace ellen_bintree + + // Forward declarations + //@cond + template < class GC, typename Key, typename T, class Traits = ellen_bintree::type_traits > + class EllenBinTreeSet; + + template < class GC, typename Key, typename T, class Traits = ellen_bintree::type_traits > + class EllenBinTreeMap; + //@endcond + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_ELLEN_BINTREE_BASE_H diff --git a/cds/container/ellen_bintree_map_hp.h b/cds/container/ellen_bintree_map_hp.h new file mode 100644 index 00000000..22c365ff --- /dev/null +++ b/cds/container/ellen_bintree_map_hp.h @@ -0,0 +1,9 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_ELLEN_BINTREE_MAP_HP_H +#define __CDS_CONTAINER_ELLEN_BINTREE_MAP_HP_H + +#include +#include + +#endif // #ifndef __CDS_CONTAINER_ELLEN_BINTREE_MAP_HP_H diff --git a/cds/container/ellen_bintree_map_impl.h b/cds/container/ellen_bintree_map_impl.h new file mode 100644 index 00000000..d42c8619 --- /dev/null +++ b/cds/container/ellen_bintree_map_impl.h @@ -0,0 +1,677 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_ELLEN_BINTREE_MAP_IMPL_H +#define __CDS_CONTAINER_ELLEN_BINTREE_MAP_IMPL_H + +#include +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Map based on Ellen's et al binary search tree + /** @ingroup cds_nonintrusive_map + @ingroup cds_nonintrusive_tree + @anchor cds_container_EllenBinTreeMap + + Source: + - [2010] F.Ellen, P.Fatourou, E.Ruppert, F.van Breugel "Non-blocking Binary Search Tree" + + %EllenBinTreeMap is an unbalanced leaf-oriented binary search tree that implements the map + abstract data type. Nodes maintains child pointers but not parent pointers. + Every internal node has exactly two children, and all data of type std::pair + currently in the tree are stored in the leaves. Internal nodes of the tree are used to direct \p find + operation along the path to the correct leaf. The keys (of \p Key type) stored in internal nodes + may or may not be in the map. + Unlike \ref cds_container_EllenBinTreeSet "EllenBinTreeSet" keys are not a part of \p T type. + The map can be represented as a set containing std::pair< Key const, T> values. + + Due to \p extract_min and \p extract_max member functions the \p %EllenBinTreeMap can act as + a priority queue. In this case you should provide unique compound key, for example, + the priority value plus some uniformly distributed random value. + + @warning Recall the tree is unbalanced. The complexity of operations is O(log N) + for uniformly distributed random keys, but in worst case the complexity is O(N). + + @note In the current implementation we do not use helping technique described in original paper. + So, the current implementation is near to fine-grained lock-based tree. + Helping will be implemented in future release + + Template arguments : + - \p GC - safe memory reclamation (i.e. light-weight garbage collector) type, like cds::gc::HP, cds::gc::PTB + Note that cds::gc::HRC is not supported. + - \p Key - key type + - \p T - value type to be stored in tree's leaf nodes. + - \p Traits - type traits. See ellen_bintree::type_traits for explanation. + + It is possible to declare option-based tree with ellen_bintree::make_map_traits metafunction + instead of \p Traits template argument. + Template argument list \p Options of ellen_bintree::make_map_traits metafunction are: + - opt::compare - key compare functor. No default functor is provided. + If the option is not specified, \p %opt::less is used. + - opt::less - specifies binary predicate used for key compare. At least \p %opt::compare or \p %opt::less should be defined. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - opt::allocator - the allocator used for \ref ellen_bintree::map_node "leaf nodes" which contains data. + Default is \ref CDS_DEFAULT_ALLOCATOR. + - opt::node_allocator - the allocator used for \ref ellen_bintree::internal_node "internal nodes". + Default is \ref CDS_DEFAULT_ALLOCATOR. + - ellen_bintree::update_desc_allocator - an allocator of \ref ellen_bintree::update_desc "update descriptors", + default is \ref CDS_DEFAULT_ALLOCATOR. + Note that update descriptor is helping data structure with short lifetime and it is good candidate for pooling. + The number of simultaneously existing descriptors is a relatively small number limited the number of threads + working with the tree and GC buffer size. + Therefore, a bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue is good choice for the free-list + of update descriptors, see cds::memory::vyukov_queue_pool free-list implementation. + Also notice that size of update descriptor is not dependent on the type of data + stored in the tree so single free-list object can be used for several EllenBinTree-based object. + - opt::stat - internal statistics. Available types: ellen_bintree::stat, ellen_bintree::empty_stat (the default) + - opt::copy_policy - key copy policy defines a functor to copy leaf node's key to internal node. + By default, assignment operator is used. + The copy functor interface is: + \code + struct copy_functor { + void operator()( Key& dest, Key const& src ); + }; + \endcode + + @note Do not include header file directly. + There are header file for each GC type: + - - for Hazard Pointer GC cds::gc::HP + - - for Pass-the-Buck GC cds::gc::PTB + - - for RCU GC + (see \ref cds_container_EllenBinTreeMap_rcu "RCU-based EllenBinTreeMap") + */ + template < + class GC, + typename Key, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = ellen_bintree::type_traits +#else + class Traits +#endif + > + class EllenBinTreeMap +#ifdef CDS_DOXYGEN_INVOKED + : public cds::intrusive::EllenBinTree< GC, Key, T, Traits > +#else + : public ellen_bintree::details::make_ellen_bintree_map< GC, Key, T, Traits >::type +#endif + { + //@cond + typedef ellen_bintree::details::make_ellen_bintree_map< GC, Key, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef GC gc ; ///< Garbage collector + typedef Key key_type ; ///< type of a key stored in the map + typedef T mapped_type ; ///< type of value stored in the map + typedef std::pair< key_type const, mapped_type > value_type ; ///< Key-value pair stored in leaf node of the mp + typedef Traits options ; ///< Traits template parameter + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key compare functor based on opt::compare and opt::less option setter. +# else + typedef typename maker::intrusive_type_traits::compare key_comparator; +# endif + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::node_allocator node_allocator_type ; ///< allocator for maintaining internal node + typedef typename base_class::stat stat ; ///< internal statistics type + typedef typename options::copy_policy copy_policy ; ///< key copy policy + + typedef typename options::allocator allocator_type ; ///< Allocator for leaf nodes + typedef typename base_class::node_allocator node_allocator ; ///< Internal node allocator + typedef typename base_class::update_desc_allocator update_desc_allocator ; ///< Update descriptor allocator + + protected: + //@cond + typedef typename base_class::value_type leaf_node; + typedef typename base_class::internal_node internal_node; + typedef typename base_class::update_desc update_desc; + + typedef typename maker::cxx_leaf_node_allocator cxx_leaf_node_allocator; + + typedef std::unique_ptr< leaf_node, typename maker::leaf_deallocator > scoped_node_ptr; + //@endcond + + public: + /// Guarded pointer + typedef cds::gc::guarded_ptr< gc, leaf_node, value_type, details::guarded_ptr_cast_set > guarded_ptr; + + protected: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_insert_functor + { + void operator()( value_type& ) const + {} + }; + + template + class insert_value_functor + { + Q const& m_val; + public: + insert_value_functor( Q const& v) + : m_val(v) + {} + + void operator()( value_type& item ) + { + item.second = m_val; + } + }; + + template + class insert_key_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + insert_key_wrapper( Func f ): base_class(f) {} + + void operator()( leaf_node& item ) + { + base_class::get()( item.m_Value ); + } + }; + + template + class ensure_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + ensure_wrapper( Func f) : base_class(f) {} + + void operator()( bool bNew, leaf_node& item, leaf_node const& ) + { + base_class::get()( bNew, item.m_Value ); + } + }; + + template + struct erase_functor + { + Func m_func; + + erase_functor( Func f ) + : m_func(f) + {} + + void operator()( leaf_node& node ) + { + cds::unref(m_func)( node.m_Value ); + } + }; + + template + class find_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + find_wrapper( Func f ) + : base_class(f) + {} + + template + void operator()( leaf_node& item, Q& val ) + { + base_class::get()( item.m_Value, val ); + } + }; +# endif + //@endcond + + public: + /// Default constructor + EllenBinTreeMap() + : base_class() + { + //static_assert( (std::is_same::value || std::is_same::value), "GC must be cds::gc::HP or cds:gc::PTB" ); + } + + /// Clears the map + ~EllenBinTreeMap() + {} + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from a value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K const& key ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return insert_key( key, [](value_type&){} ); +# else + return insert_key( key, empty_insert_functor() ); +# endif + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref value_type should be constructible from \p val of type \p V. + + Returns \p true if \p val is inserted into the map, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { + scoped_node_ptr pNode( cxx_leaf_node_allocator().New( key, val )); + if ( base_class::insert( *pNode )) + { + pNode.release(); + return true; + } + return false; + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + The user-defined functor can be passed by reference using boost::ref + and it is called only if inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + */ + template + bool insert_key( const K& key, Func func ) + { + scoped_node_ptr pNode( cxx_leaf_node_allocator().New( key )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( base_class::insert( *pNode, [&func]( leaf_node& item ) { cds::unref(func)( item.m_Value ); } )) +# else + insert_key_wrapper wrapper(func); + if ( base_class::insert( *pNode, cds::ref(wrapper) )) +#endif + { + pNode.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// For key \p key inserts data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + @note This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( K&& key, Args&&... args ) + { + scoped_node_ptr pNode( cxx_leaf_node_allocator().New( std::forward(key), std::forward(args)... )); + if ( base_class::insert( *pNode )) { + pNode.release(); + return true; + } + return false; + } +# endif + + /// Ensures that the \p key exists in the map + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the map, then the new item created from \p key + is inserted into the map (note that in this case the \ref key_type should be + constructible from type \p K). + Otherwise, the functor \p func is called with item found. + The functor \p Func may be a function with signature: + \code + void func( bool bNew, value_type& item ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + + The functor may change any fields of the \p item.second that is \ref value_type. + + You may pass \p func argument by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( K const& key, Func func ) + { + scoped_node_ptr pNode( cxx_leaf_node_allocator().New( key )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair res = base_class::ensure( *pNode, + [&func](bool bNew, leaf_node& item, leaf_node const& ){ cds::unref(func)( bNew, item.m_Value ); } + ); +# else + ensure_wrapper wrapper( func ); + std::pair res = base_class::ensure( *pNode, cds::ref(wrapper) ); +# endif + if ( res.first && res.second ) + pNode.release(); + return res; + } + + /// Delete \p key from the map + /**\anchor cds_nonintrusive_EllenBinTreeMap_erase_val + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return base_class::erase(key); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred ) + { + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >()); + } + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_EllenBinTreeMap_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + The functor may be passed by reference using boost:ref + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase( key, [&f]( leaf_node& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor wrapper(f); + return base_class::erase( key, cds::ref(wrapper)); +# endif + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >(), + [&f]( leaf_node& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor wrapper(f); + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >(), cds::ref(wrapper)); +# endif + } + + /// Extracts an item with minimal key from the map + /** + If the map is not empty, the function returns \p true, \p result contains a pointer to minimum value. + If the map is empty, the function returns \p false, \p result is left unchanged. + + @note Due the concurrent nature of the map, the function extracts nearly minimum key. + It means that the function gets leftmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of tree traversing. + + The guarded pointer \p result prevents deallocation of returned item, + see cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + bool extract_min( guarded_ptr& result ) + { + return base_class::extract_min_( result.guard() ); + } + + /// Extracts an item with maximal key from the map + /** + If the map is not empty, the function returns \p true, \p result contains a pointer to maximal value. + If the map is empty, the function returns \p false, \p result is left unchanged. + + @note Due the concurrent nature of the map, the function extracts nearly maximal key. + It means that the function gets rightmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key great than leftmost item's key. + So, the function returns the item with maximum key at the moment of tree traversing. + + The guarded pointer \p result prevents deallocation of returned item, + see cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + bool extract_max( guarded_ptr& result ) + { + return base_class::extract_max_( result.guard() ); + } + + /// Extracts an item from the tree + /** \anchor cds_nonintrusive_EllenBinTreeMap_extract + The function searches an item with key equal to \p key in the tree, + unlinks it, and returns pointer to an item found in \p result parameter. + If the item is not found the function returns \p false. + + The guarded pointer \p result prevents deallocation of returned item, + see cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + template + bool extract( guarded_ptr& result, Q const& key ) + { + return base_class::extract_( result.guard(), key ); + } + + /// Extracts an item from the map using \p pred for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_extract "extract(guarded_ptr&, Q const&)" + but \p pred is used for key compare. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + bool extract_with( guarded_ptr& result, Q const& key, Less pred ) + { + return base_class::extract_with_( result.guard(), key, + cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >()); + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_EllenBinTreeMap_find_cfunc + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + You can pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change \p item.second. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( key, [&f](leaf_node& item, K const& ) { cds::unref(f)( item.m_Value );}); +# else + find_wrapper wrapper(f); + return base_class::find( key, cds::ref(wrapper) ); +# endif + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_find_cfunc "find(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >(), + [&f](leaf_node& item, K const& ) { cds::unref(f)( item.m_Value );}); +# else + find_wrapper wrapper(f); + return base_class::find_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >(), cds::ref(wrapper) ); +# endif + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_EllenBinTreeMap_find_val + + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool find( K const& key ) + { + return base_class::find( key ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_find_val "find(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred ) + { + return base_class::find_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >() ); + } + + /// Finds \p key and returns the item found + /** @anchor cds_nonintrusive_EllenBinTreeMap_get + The function searches the item with key equal to \p key and returns the item found in \p result parameter. + The function returns \p true if \p key is found, \p false otherwise. + + The guarded pointer \p result prevents deallocation of returned item, + see cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + template + bool get( guarded_ptr& result, Q const& key ) + { + return base_class::get_( result.guard(), key ); + } + + /// Finds \p key with predicate \p pred and returns the item found + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_get "get(guarded_ptr&, Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + bool get_with( guarded_ptr& result, Q const& key, Less pred ) + { + return base_class::get_with_( result.guard(), key, + cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >() ); + } + + /// Clears the map + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Checks internal consistency (not atomic, not thread-safe) + /** + The debugging function to check internal consistency of the tree. + */ + bool check_consistency() const + { + return base_class::check_consistency(); + } + + }; +}} // namespace cds::container + +#endif //#ifndef __CDS_CONTAINER_ELLEN_BINTREE_MAP_IMPL_H diff --git a/cds/container/ellen_bintree_map_ptb.h b/cds/container/ellen_bintree_map_ptb.h new file mode 100644 index 00000000..bb2f77d6 --- /dev/null +++ b/cds/container/ellen_bintree_map_ptb.h @@ -0,0 +1,9 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_ELLEN_BINTREE_MAP_PTB_H +#define __CDS_CONTAINER_ELLEN_BINTREE_MAP_PTB_H + +#include +#include + +#endif // #ifndef __CDS_CONTAINER_ELLEN_BINTREE_MAP_PTB_H diff --git a/cds/container/ellen_bintree_map_rcu.h b/cds/container/ellen_bintree_map_rcu.h new file mode 100644 index 00000000..6634814f --- /dev/null +++ b/cds/container/ellen_bintree_map_rcu.h @@ -0,0 +1,704 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_ELLEN_BINTREE_MAP_RCU_H +#define __CDS_CONTAINER_ELLEN_BINTREE_MAP_RCU_H + +#include +#include +#include + +namespace cds { namespace container { + + /// Map based on Ellen's et al binary search tree (RCU specialization) + /** @ingroup cds_nonintrusive_map + @ingroup cds_nonintrusive_tree + @anchor cds_container_EllenBinTreeMap_rcu + + Source: + - [2010] F.Ellen, P.Fatourou, E.Ruppert, F.van Breugel "Non-blocking Binary Search Tree" + + %EllenBinTreeMap is an unbalanced leaf-oriented binary search tree that implements the map + abstract data type. Nodes maintains child pointers but not parent pointers. + Every internal node has exactly two children, and all data of type std::pair + currently in the tree are stored in the leaves. Internal nodes of the tree are used to direct \p find + operation along the path to the correct leaf. The keys (of \p Key type) stored in internal nodes + may or may not be in the map. + Unlike \ref cds_container_EllenBinTreeSet_rcu "EllenBinTreeSet" keys are not a part of \p T type. + The map can be represented as a set containing std::pair< Key const, T> values. + + Due to \p extract_min and \p extract_max member functions the \p %EllenBinTreeMap can act as + a priority queue. In this case you should provide unique compound key, for example, + the priority value plus some uniformly distributed random value. + + @warning Recall the tree is unbalanced. The complexity of operations is O(log N) + for uniformly distributed random keys, but in worst case the complexity is O(N). + + @note In the current implementation we do not use helping technique described in original paper. + So, the current implementation is near to fine-grained lock-based tree. + Helping will be implemented in future release + + Template arguments : + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p Key - key type + - \p T - value type to be stored in tree's leaf nodes. + - \p Traits - type traits. See ellen_bintree::type_traits for explanation. + + It is possible to declare option-based tree with ellen_bintree::make_map_traits metafunction + instead of \p Traits template argument. + Template argument list \p Options of ellen_bintree::make_map_traits metafunction are: + - opt::compare - key compare functor. No default functor is provided. + If the option is not specified, \p %opt::less is used. + - opt::less - specifies binary predicate used for key compare. At least \p %opt::compare or \p %opt::less should be defined. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - opt::allocator - the allocator used for \ref ellen_bintree::map_node "leaf nodes" which contains data. + Default is \ref CDS_DEFAULT_ALLOCATOR. + - opt::node_allocator - the allocator used for \ref ellen_bintree::internal_node "internal nodes". + Default is \ref CDS_DEFAULT_ALLOCATOR. + - ellen_bintree::update_desc_allocator - an allocator of \ref ellen_bintree::update_desc "update descriptors", + default is \ref CDS_DEFAULT_ALLOCATOR. + Note that update descriptor is helping data structure with short lifetime and it is good candidate for pooling. + The number of simultaneously existing descriptors is a relatively small number limited the number of threads + working with the tree and RCU buffer size. + Therefore, a bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue is good choice for the free-list + of update descriptors, see cds::memory::vyukov_queue_pool free-list implementation. + Also notice that size of update descriptor is not dependent on the type of data + stored in the tree so single free-list object can be used for several EllenBinTree-based object. + - opt::stat - internal statistics. Available types: ellen_bintree::stat, ellen_bintree::empty_stat (the default) + - opt::rcu_check_deadlock - a deadlock checking policy. Default is opt::v::rcu_throw_deadlock + - opt::copy_policy - key copy policy defines a functor to copy leaf node's key to internal node. + By default, assignment operator is used. + The copy functor interface is: + \code + struct copy_functor { + void operator()( Key& dest, Key const& src ); + }; + \endcode + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + */ + template < + class RCU, + typename Key, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = ellen_bintree::type_traits +#else + class Traits +#endif + > + class EllenBinTreeMap< cds::urcu::gc, Key, T, Traits > +#ifdef CDS_DOXYGEN_INVOKED + : public cds::intrusive::EllenBinTree< cds::urcu::gc, Key, T, Traits > +#else + : public ellen_bintree::details::make_ellen_bintree_map< cds::urcu::gc, Key, T, Traits >::type +#endif + { + //@cond + typedef ellen_bintree::details::make_ellen_bintree_map< cds::urcu::gc, Key, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef cds::urcu::gc gc ; ///< RCU Garbage collector + typedef Key key_type ; ///< type of a key stored in the map + typedef T mapped_type ; ///< type of value stored in the map + typedef std::pair< key_type const, mapped_type > value_type ; ///< Key-value pair stored in leaf node of the mp + typedef Traits options ; ///< Traits template parameter + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key compare functor based on opt::compare and opt::less option setter. +# else + typedef typename maker::intrusive_type_traits::compare key_comparator; +# endif + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::node_allocator node_allocator_type ; ///< allocator for maintaining internal node + typedef typename base_class::stat stat ; ///< internal statistics type + typedef typename base_class::rcu_check_deadlock rcu_check_deadlock ; ///< Deadlock checking policy + typedef typename options::copy_policy copy_policy ; ///< key copy policy + + typedef typename options::allocator allocator_type ; ///< Allocator for leaf nodes + typedef typename base_class::node_allocator node_allocator ; ///< Internal node allocator + typedef typename base_class::update_desc_allocator update_desc_allocator ; ///< Update descriptor allocator + + static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions do not require external locking + + protected: + //@cond + typedef typename base_class::value_type leaf_node; + typedef typename base_class::internal_node internal_node; + typedef typename base_class::update_desc update_desc; + + typedef typename maker::cxx_leaf_node_allocator cxx_leaf_node_allocator; + + typedef std::unique_ptr< leaf_node, typename maker::leaf_deallocator > scoped_node_ptr; + //@endcond + + public: + typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock + + /// pointer to extracted node + typedef cds::urcu::exempt_ptr< gc, leaf_node, value_type, typename maker::intrusive_type_traits::disposer, + cds::urcu::details::conventional_exempt_member_cast + > exempt_ptr; + + protected: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_insert_functor + { + void operator()( value_type& ) const + {} + }; + + template + class insert_value_functor + { + Q const& m_val; + public: + insert_value_functor( Q const& v) + : m_val(v) + {} + + void operator()( value_type& item ) + { + item.second = m_val; + } + }; + + template + class insert_key_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + insert_key_wrapper( Func f ): base_class(f) {} + + void operator()( leaf_node& item ) + { + base_class::get()( item.m_Value ); + } + }; + + template + class ensure_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + ensure_wrapper( Func f) : base_class(f) {} + + void operator()( bool bNew, leaf_node& item, leaf_node const& ) + { + base_class::get()( bNew, item.m_Value ); + } + }; + + template + struct erase_functor + { + Func m_func; + + erase_functor( Func f ) + : m_func(f) + {} + + void operator()( leaf_node& node ) + { + cds::unref(m_func)( node.m_Value ); + } + }; + + template + class find_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + find_wrapper( Func f ) + : base_class(f) + {} + + template + void operator()( leaf_node& item, Q& val ) + { + base_class::get()( item.m_Value, val ); + } + }; +# endif + //@endcond + + public: + /// Default constructor + EllenBinTreeMap() + : base_class() + {} + + /// Clears the map + ~EllenBinTreeMap() + {} + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from a value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K const& key ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return insert_key( key, [](value_type&){} ); +# else + return insert_key( key, empty_insert_functor() ); +# endif + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref value_type should be constructible from \p val of type \p V. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns \p true if \p val is inserted into the map, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { + scoped_node_ptr pNode( cxx_leaf_node_allocator().New( key, val )); + if ( base_class::insert( *pNode )) + { + pNode.release(); + return true; + } + return false; + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + The user-defined functor can be passed by reference using boost::ref + and it is called only if inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool insert_key( const K& key, Func func ) + { + scoped_node_ptr pNode( cxx_leaf_node_allocator().New( key )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( base_class::insert( *pNode, [&func]( leaf_node& item ) { cds::unref(func)( item.m_Value ); } )) +# else + insert_key_wrapper wrapper(func); + if ( base_class::insert( *pNode, cds::ref(wrapper) )) +#endif + { + pNode.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// For key \p key inserts data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + RCU \p synchronize method can be called. RCU should not be locked. + + @note This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( K&& key, Args&&... args ) + { + scoped_node_ptr pNode( cxx_leaf_node_allocator().New( std::forward(key), std::forward(args)... )); + if ( base_class::insert( *pNode )) { + pNode.release(); + return true; + } + return false; + } +# endif + + /// Ensures that the \p key exists in the map + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the map, then the new item created from \p key + is inserted into the map (note that in this case the \ref key_type should be + constructible from type \p K). + Otherwise, the functor \p func is called with item found. + The functor \p Func may be a function with signature: + \code + void func( bool bNew, value_type& item ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + + The functor may change any fields of the \p item.second that is \ref value_type. + + You may pass \p func argument by reference using boost::ref. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( K const& key, Func func ) + { + scoped_node_ptr pNode( cxx_leaf_node_allocator().New( key )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair res = base_class::ensure( *pNode, + [&func](bool bNew, leaf_node& item, leaf_node const& ){ cds::unref(func)( bNew, item.m_Value ); } + ); +# else + ensure_wrapper wrapper( func ); + std::pair res = base_class::ensure( *pNode, cds::ref(wrapper) ); +# endif + if ( res.first && res.second ) + pNode.release(); + return res; + } + + /// Delete \p key from the map + /**\anchor cds_nonintrusive_EllenBinTreeMap_rcu_erase_val + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return base_class::erase(key); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_rcu_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred ) + { + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >()); + } + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_EllenBinTreeMap_rcu_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + The functor may be passed by reference using boost:ref + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase( key, [&f]( leaf_node& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor wrapper(f); + return base_class::erase( key, cds::ref(wrapper)); +# endif + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_rcu_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >(), + [&f]( leaf_node& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor wrapper(f); + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >(), cds::ref(wrapper)); +# endif + } + + /// Extracts an item with minimal key from the map + /** + If the map is not empty, the function returns \p true, \p result contains a pointer to value. + If the map is empty, the function returns \p false, \p result is left unchanged. + + @note Due the concurrent nature of the map, the function extracts nearly minimum key. + It means that the function gets leftmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item. + The deallocator will be implicitly invoked when \p result object is destroyed or when + result.release() is called, see cds::urcu::exempt_ptr for explanation. + @note Before reusing \p result object you should call its \p release() method. + */ + bool extract_min( exempt_ptr& result ) + { + return base_class::extract_min_( result ); + } + + /// Extracts an item with maximal key from the map + /** + If the map is not empty, the function returns \p true, \p result contains a pointer to extracted item. + If the map is empty, the function returns \p false, \p result is left unchanged. + + @note Due the concurrent nature of the map, the function extracts nearly maximal key. + It means that the function gets rightmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key great than leftmost item's key. + So, the function returns the item with maximum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item. + The deallocator will be implicitly invoked when \p result object is destroyed or when + result.release() is called, see cds::urcu::exempt_ptr for explanation. + @note Before reusing \p result object you should call its \p release() method. + */ + bool extract_max( exempt_ptr& result ) + { + return base_class::extract_max_( result ); + } + + /// Extracts an item from the map + /** \anchor cds_nonintrusive_EllenBinTreeMap_rcu_extract + The function searches an item with key equal to \p key in the tree, + unlinks it, and returns pointer to an item found in \p result parameter. + If \p key is not found the function returns \p false. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not destroy the item found. + The dealloctor will be implicitly invoked when \p result object is destroyed or when + result.release() is called, see cds::urcu::exempt_ptr for explanation. + @note Before reusing \p result object you should call its \p release() method. + */ + template + bool extract( exempt_ptr& result, Q const& key ) + { + return base_class::extract_( result, key, typename base_class::node_compare()); + } + + /// Extracts an item from the map using \p pred for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_rcu_extract "extract(exempt_ptr&, Q const&)" + but \p pred is used for key compare. + \p Less has the interface like \p std::less and should meet \ref cds_container_EllenBinTreeSet_rcu_less + "predicate requirements". + \p pred must imply the same element order as the comparator used for building the map. + */ + template + bool extract_with( exempt_ptr& result, Q const& val, Less pred ) + { + return base_class::extract_with_( result, val, + cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >() ); + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_EllenBinTreeMap_rcu_find_cfunc + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + You can pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change \p item.second. + + The function applies RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( key, [&f](leaf_node& item, K const& ) { cds::unref(f)( item.m_Value );}); +# else + find_wrapper wrapper(f); + return base_class::find( key, cds::ref(wrapper) ); +# endif + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_rcu_find_cfunc "find(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >(), + [&f](leaf_node& item, K const& ) { cds::unref(f)( item.m_Value );}); +# else + find_wrapper wrapper(f); + return base_class::find_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >(), cds::ref(wrapper) ); +# endif + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_EllenBinTreeMap_rcu_find_val + + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool find( K const& key ) + { + return base_class::find( key ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_rcu_find_val "find(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred ) + { + return base_class::find_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >() ); + } + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_EllenBinTreeMap_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns \p NULL. + + RCU should be locked before call the function. + Returned pointer is valid while RCU is locked. + */ + template + value_type * get( Q const& key ) const + { + leaf_node * pNode = base_class::get( key ); + return pNode ? &pNode->m_Value : null_ptr(); + } + + /// Finds \p key with \p pred predicate and return the item found + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type + and \p Q in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + value_type * get_with( Q const& key, Less pred ) const + { + leaf_node * pNode = base_class::get_with( key, + cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >()); + return pNode ? &pNode->m_Value : null_ptr(); + } + + /// Clears the map + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Checks internal consistency (not atomic, not thread-safe) + /** + The debugging function to check internal consistency of the tree. + */ + bool check_consistency() const + { + return base_class::check_consistency(); + } + + }; +}} // namespace cds::container + +#endif //#ifndef __CDS_CONTAINER_ELLEN_BINTREE_MAP_RCU_H diff --git a/cds/container/ellen_bintree_set_hp.h b/cds/container/ellen_bintree_set_hp.h new file mode 100644 index 00000000..34a60709 --- /dev/null +++ b/cds/container/ellen_bintree_set_hp.h @@ -0,0 +1,9 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_ELLEN_BINTREE_SET_HP_H +#define __CDS_CONTAINER_ELLEN_BINTREE_SET_HP_H + +#include +#include + +#endif // #ifndef __CDS_CONTAINER_ELLEN_BINTREE_SET_HP_H diff --git a/cds/container/ellen_bintree_set_impl.h b/cds/container/ellen_bintree_set_impl.h new file mode 100644 index 00000000..a043156a --- /dev/null +++ b/cds/container/ellen_bintree_set_impl.h @@ -0,0 +1,753 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_ELLEN_BINTREE_SET_IMPL_H +#define __CDS_CONTAINER_ELLEN_BINTREE_SET_IMPL_H + +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Set based on Ellen's et al binary search tree + /** @ingroup cds_nonintrusive_set + @ingroup cds_nonintrusive_tree + @anchor cds_container_EllenBinTreeSet + + Source: + - [2010] F.Ellen, P.Fatourou, E.Ruppert, F.van Breugel "Non-blocking Binary Search Tree" + + %EllenBinTreeSet is an unbalanced leaf-oriented binary search tree that implements the set + abstract data type. Nodes maintains child pointers but not parent pointers. + Every internal node has exactly two children, and all data of type \p T currently in + the tree are stored in the leaves. Internal nodes of the tree are used to direct \p find + operation along the path to the correct leaf. The keys (of \p Key type) stored in internal nodes + may or may not be in the set. \p Key type is a subset of \p T type. + There should be exactly defined a key extracting functor for converting object of type \p T to + object of type \p Key. + + Due to \p extract_min and \p extract_max member functions the \p %EllenBinTreeSet can act as + a priority queue. In this case you should provide unique compound key, for example, + the priority value plus some uniformly distributed random value. + + @warning Recall the tree is unbalanced. The complexity of operations is O(log N) + for uniformly distributed random keys, but in worst case the complexity is O(N). + + @note In the current implementation we do not use helping technique described in original paper. + So, the current implementation is near to fine-grained lock-based tree. + Helping will be implemented in future release + + Template arguments : + - \p GC - safe memory reclamation (i.e. light-weight garbage collector) type, like cds::gc::HP, cds::gc::PTB + Note that cds::gc::HRC is not supported. + - \p Key - key type, a subset of \p T + - \p T - type to be stored in tree's leaf nodes. + - \p Traits - type traits. See ellen_bintree::type_traits for explanation. + + It is possible to declare option-based tree with ellen_bintree::make_set_traits metafunction + instead of \p Traits template argument. + Template argument list \p Options of ellen_bintree::make_set_traits metafunction are: + - ellen_bintree::key_extractor - key extracting functor, mandatory option. The functor has the following prototype: + \code + struct key_extractor { + void operator ()( Key& dest, T const& src ); + }; + \endcode + It should initialize \p dest key from \p src data. The functor is used to initialize internal nodes. + - opt::compare - key compare functor. No default functor is provided. + If the option is not specified, \p %opt::less is used. + - opt::less - specifies binary predicate used for key compare. At least \p %opt::compare or \p %opt::less should be defined. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - opt::allocator - the allocator used for \ref ellen_bintree::node "leaf nodes" which contains data. + Default is \ref CDS_DEFAULT_ALLOCATOR. + - opt::node_allocator - the allocator used for internal nodes. Default is \ref CDS_DEFAULT_ALLOCATOR. + - ellen_bintree::update_desc_allocator - an allocator of \ref ellen_bintree::update_desc "update descriptors", + default is \ref CDS_DEFAULT_ALLOCATOR. + Note that update descriptor is helping data structure with short lifetime and it is good candidate for pooling. + The number of simultaneously existing descriptors is a relatively small number limited the number of threads + working with the tree and GC buffer size. + Therefore, a bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue is good choice for the free-list + of update descriptors, see cds::memory::vyukov_queue_pool free-list implementation. + Also notice that size of update descriptor is not dependent on the type of data + stored in the tree so single free-list object can be used for several EllenBinTree-based object. + - opt::stat - internal statistics. Available types: ellen_bintree::stat, ellen_bintree::empty_stat (the default) + + @note Do not include header file directly. + There are header file for each GC type: + - - for Hazard Pointer GC cds::gc::HP + - - for Pass-the-Buck GC cds::gc::PTB + - - for RCU GC + (see \ref cds_container_EllenBinTreeSet_rcu "RCU-based EllenBinTreeSet") + + @anchor cds_container_EllenBinTreeSet_less + Predicate requirements + + opt::less, opt::compare and other predicates using with member fuctions should accept at least parameters + of type \p T and \p Key in any combination. + For example, for \p Foo struct with \p std::string key field the appropiate \p less functor is: + \code + struct Foo + { + std::string m_strKey; + ... + }; + + struct less { + bool operator()( Foo const& v1, Foo const& v2 ) const + { return v1.m_strKey < v2.m_strKey ; } + + bool operator()( Foo const& v, std::string const& s ) const + { return v.m_strKey < s ; } + + bool operator()( std::string const& s, Foo const& v ) const + { return s < v.m_strKey ; } + + // Support comparing std::string and char const * + bool operator()( std::string const& s, char const * p ) const + { return s.compare(p) < 0 ; } + + bool operator()( Foo const& v, char const * p ) const + { return v.m_strKey.compare(p) < 0 ; } + + bool operator()( char const * p, std::string const& s ) const + { return s.compare(p) > 0; } + + bool operator()( char const * p, Foo const& v ) const + { return v.m_strKey.compare(p) > 0; } + }; + \endcode + */ + template < + class GC, + typename Key, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = ellen_bintree::type_traits +#else + class Traits +#endif + > + class EllenBinTreeSet +#ifdef CDS_DOXYGEN_INVOKED + : public cds::intrusive::EllenBinTree< GC, Key, T, Traits > +#else + : public ellen_bintree::details::make_ellen_bintree_set< GC, Key, T, Traits >::type +#endif + { + //@cond + typedef ellen_bintree::details::make_ellen_bintree_set< GC, Key, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef GC gc ; ///< Garbage collector + typedef Key key_type ; ///< type of a key stored in internal nodes; key is a part of \p value_type + typedef T value_type ; ///< type of value stored in the binary tree + typedef Traits options ; ///< Traits template parameter + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key compare functor based on opt::compare and opt::less option setter. +# else + typedef typename maker::intrusive_type_traits::compare key_comparator; +# endif + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::stat stat ; ///< internal statistics type + typedef typename options::key_extractor key_extractor ; ///< key extracting functor + + typedef typename options::allocator allocator_type ; ///< Allocator for leaf nodes + typedef typename base_class::node_allocator node_allocator ; ///< Internal node allocator + typedef typename base_class::update_desc_allocator update_desc_allocator ; ///< Update descriptor allocator + + protected: + //@cond + typedef typename maker::cxx_leaf_node_allocator cxx_leaf_node_allocator; + typedef typename base_class::value_type leaf_node; + typedef typename base_class::internal_node internal_node; + + typedef std::unique_ptr< leaf_node, typename maker::leaf_deallocator > scoped_node_ptr; + //@endcond + + public: + /// Guarded pointer + typedef cds::gc::guarded_ptr< gc, leaf_node, value_type, details::guarded_ptr_cast_set > guarded_ptr; + + protected: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + template + struct insert_functor + { + Func m_func; + + insert_functor ( Func f ) + : m_func(f) + {} + + void operator()( leaf_node& node ) + { + cds::unref(m_func)( node.m_Value ); + } + }; + + template + struct ensure_functor + { + Func m_func; + Q const& m_arg; + + ensure_functor( Q const& arg, Func f ) + : m_func(f) + , m_arg( arg ) + {} + + void operator ()( bool bNew, leaf_node& node, leaf_node& ) + { + cds::unref(m_func)( bNew, node.m_Value, m_arg ); + } + }; + + template + struct erase_functor + { + Func m_func; + + erase_functor( Func f ) + : m_func(f) + {} + + void operator()( leaf_node const& node ) + { + cds::unref(m_func)( node.m_Value ); + } + }; + + template + struct find_functor + { + Func m_func; + + find_functor( Func f ) + : m_func(f) + {} + + template + void operator ()( leaf_node& node, Q& val ) + { + cds::unref(m_func)( node.m_Value, val ); + } + }; +#endif + //@endcond + + public: + /// Default constructor + EllenBinTreeSet() + : base_class() + { + //static_assert( (std::is_same::value || std::is_same::value), "GC must be cds::gc::HP or cds:gc::PTB" ); + } + + /// Clears the set + ~EllenBinTreeSet() + {} + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain at least the complete key for the node. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + scoped_node_ptr sp( cxx_leaf_node_allocator().New( val )); + if ( base_class::insert( *sp.get() )) { + sp.release(); + return true; + } + return false; + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-fields of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. It may be passed by reference + using boost::ref + */ + template + bool insert( Q const& val, Func f ) + { + scoped_node_ptr sp( cxx_leaf_node_allocator().New( val )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( base_class::insert( *sp.get(), [&f]( leaf_node& val ) { cds::unref(f)( val.m_Value ); } )) +# else + insert_functor wrapper(f); + if ( base_class::insert( *sp, cds::ref(wrapper) )) +# endif + { + sp.release(); + return true; + } + return false; + } + + /// Ensures that the item exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p val key not found in the set, then the new item created from \p val + is inserted into the set. Otherwise, the functor \p func is called with the item found. + The functor \p Func should be a function with signature: + \code + void func( bool bNew, value_type& item, const Q& val ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p key passed into the \p ensure function + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair ensure( const Q& val, Func func ) + { + scoped_node_ptr sp( cxx_leaf_node_allocator().New( val )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair bRes = base_class::ensure( *sp, + [&func, &val](bool bNew, leaf_node& node, leaf_node&){ cds::unref(func)( bNew, node.m_Value, val ); }); +# else + ensure_functor wrapper( val, func ); + std::pair bRes = base_class::ensure( *sp, cds::ref(wrapper)); +# endif + if ( bRes.first && bRes.second ) + sp.release(); + return bRes; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + @note This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr sp( cxx_leaf_node_allocator().New( std::forward(args)... )); + if ( base_class::insert( *sp.get() )) { + sp.release(); + return true; + } + return false; + } +# endif + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_EllenBinTreeSet_erase_val + + The item comparator should be able to compare the type \p value_type + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); + } + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_EllenBinTreeSet_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type const& val); + }; + \endcode + The functor may be passed by reference using boost:ref + + Since the key of MichaelHashSet's \p value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase( key, [&f]( leaf_node const& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor wrapper(f); + return base_class::erase( key, cds::ref(wrapper)); +# endif + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), + [&f]( leaf_node const& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor wrapper(f); + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), cds::ref(wrapper)); +# endif + } + + /// Extracts an item with minimal key from the set + /** + If the set is not empty, the function returns \p true, \p result contains a pointer to minimum value. + If the set is empty, the function returns \p false, \p result is left unchanged. + + @note Due the concurrent nature of the set, the function extracts nearly minimum key. + It means that the function gets leftmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of tree traversing. + + The guarded pointer \p dest prevents deallocation of returned item, + see cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + bool extract_min( guarded_ptr& result ) + { + return base_class::extract_min_( result.guard() ); + } + + /// Extracts an item with maximal key from the set + /** + If the set is not empty, the function returns \p true, \p result contains a pointer to maximal value. + If the set is empty, the function returns \p false, \p result is left unchanged. + + @note Due the concurrent nature of the set, the function extracts nearly maximal key. + It means that the function gets rightmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key great than leftmost item's key. + So, the function returns the item with maximum key at the moment of tree traversing. + + The guarded pointer \p dest prevents deallocation of returned item, + see cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + bool extract_max( guarded_ptr& result ) + { + return base_class::extract_max_( result.guard() ); + } + + /// Extracts an item from the tree + /** \anchor cds_nonintrusive_EllenBinTreeSet_extract + The function searches an item with key equal to \p key in the tree, + unlinks it, and returns pointer to an item found in \p result parameter. + If the item is not found the function returns \p false. + + The guarded pointer \p dest prevents deallocation of returned item, + see cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + template + bool extract( guarded_ptr& result, Q const& key ) + { + return base_class::extract_( result.guard(), key ); + } + + /// Extracts an item from the set using \p pred for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_extract "extract(guarded_ptr& dest, Q const&)" + but \p pred is used for key compare. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool extract_with( guarded_ptr& result, Q const& key, Less pred ) + { + return base_class::extract_with_( result.guard(), key, + cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); + } + + /// Find the key \p val + /** + @anchor cds_nonintrusive_EllenBinTreeSet_find_func + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( val, [&f]( leaf_node& node, Q& v ) { cds::unref(f)( node.m_Value, v ); }); +# else + find_functor wrapper(f); + return base_class::find( val, cds::ref(wrapper)); +# endif + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( val, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), + [&f]( leaf_node& node, Q& v ) { cds::unref(f)( node.m_Value, v ); } ); +# else + find_functor wrapper(f); + return base_class::find_with( val, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), + cds::ref(wrapper)); +# endif + } + + /// Find the key \p val + /** @anchor cds_nonintrusive_EllenBinTreeSet_find_cfunc + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( val, [&f]( leaf_node& node, Q const& v ) { cds::unref(f)( node.m_Value, v ); }); +# else + find_functor wrapper(f); + return base_class::find( val, cds::ref(wrapper)); +# endif + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( val, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), + [&f]( leaf_node& node, Q const& v ) { cds::unref(f)( node.m_Value, v ); } ); +# else + find_functor wrapper(f); + return base_class::find_with( val, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), + cds::ref(wrapper)); +# endif + } + + /// Find the key \p val + /** @anchor cds_nonintrusive_EllenBinTreeSet_find_val + + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \ref value_type. + */ + template + bool find( Q const & val ) + { + return base_class::find( val ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred ) + { + return base_class::find_with( val, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); + } + + /// Finds \p key and returns the item found + /** @anchor cds_nonintrusive_EllenBinTreeSet_get + The function searches the item with key equal to \p key and returns the item found in \p result parameter. + The function returns \p true if \p key is found, \p false otherwise. + + The guarded pointer \p dest prevents deallocation of returned item, + see cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + template + bool get( guarded_ptr& result, Q const& key ) + { + return base_class::get_( result.guard(), key ); + } + + /// Finds \p key with predicate \p pred and returns the item found + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_get "get(guarded_ptr&, Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool get_with( guarded_ptr& result, Q const& key, Less pred ) + { + return base_class::get_with_( result.guard(), key, + cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >() ); + } + + /// Clears the set (non-atomic) + /** + The function unlink all items from the tree. + The function is not atomic, thus, in multi-threaded environment with parallel insertions + this sequence + \code + set.clear(); + assert( set.empty() ); + \endcode + the assertion could be raised. + + For each leaf the \ref disposer will be called after unlinking. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the set is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + /** + Only leaf nodes containing user data are counted. + + The value returned depends on item counter type provided by \p Traits template parameter. + If it is atomicity::empty_item_counter this function always returns 0. + Therefore, the function is not suitable for checking the tree emptiness, use \ref empty + member function for this purpose. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Checks internal consistency (not atomic, not thread-safe) + /** + The debugging function to check internal consistency of the tree. + */ + bool check_consistency() const + { + return base_class::check_consistency(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_ELLEN_BINTREE_SET_IMPL_H diff --git a/cds/container/ellen_bintree_set_ptb.h b/cds/container/ellen_bintree_set_ptb.h new file mode 100644 index 00000000..4f1f2176 --- /dev/null +++ b/cds/container/ellen_bintree_set_ptb.h @@ -0,0 +1,9 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_ELLEN_BINTREE_SET_PTB_H +#define __CDS_CONTAINER_ELLEN_BINTREE_SET_PTB_H + +#include +#include + +#endif // #ifndef __CDS_CONTAINER_ELLEN_BINTREE_SET_PTB_H diff --git a/cds/container/ellen_bintree_set_rcu.h b/cds/container/ellen_bintree_set_rcu.h new file mode 100644 index 00000000..7ca083e1 --- /dev/null +++ b/cds/container/ellen_bintree_set_rcu.h @@ -0,0 +1,785 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_ELLEN_BINTREE_SET_RCU_H +#define __CDS_CONTAINER_ELLEN_BINTREE_SET_RCU_H + +#include +#include + +namespace cds { namespace container { + + /// Set based on Ellen's et al binary search tree (RCU specialization) + /** @ingroup cds_nonintrusive_set + @ingroup cds_nonintrusive_tree + @anchor cds_container_EllenBinTreeSet_rcu + + Source: + - [2010] F.Ellen, P.Fatourou, E.Ruppert, F.van Breugel "Non-blocking Binary Search Tree" + + %EllenBinTreeSet is an unbalanced leaf-oriented binary search tree that implements the set + abstract data type. Nodes maintains child pointers but not parent pointers. + Every internal node has exactly two children, and all data of type \p T currently in + the tree are stored in the leaves. Internal nodes of the tree are used to direct \p find + operation along the path to the correct leaf. The keys (of \p Key type) stored in internal nodes + may or may not be in the set. \p Key type is a subset of \p T type. + There should be exactly defined a key extracting functor for converting object of type \p T to + object of type \p Key. + + Due to \p extract_min and \p extract_max member functions the \p %EllenBinTreeSet can act as + a priority queue. In this case you should provide unique compound key, for example, + the priority value plus some uniformly distributed random value. + + @warning Recall the tree is unbalanced. The complexity of operations is O(log N) + for uniformly distributed random keys, but in worst case the complexity is O(N). + + @note In the current implementation we do not use helping technique described in original paper. + So, the current implementation is near to fine-grained lock-based tree. + Helping will be implemented in future release + + Template arguments : + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p Key - key type, a subset of \p T + - \p T - type to be stored in tree's leaf nodes. + - \p Traits - type traits. See ellen_bintree::type_traits for explanation. + + It is possible to declare option-based tree with ellen_bintree::make_set_traits metafunction + instead of \p Traits template argument. + Template argument list \p Options of ellen_bintree::make_set_traits metafunction are: + - ellen_bintree::key_extractor - key extracting functor, mandatory option. The functor has the following prototype: + \code + struct key_extractor { + void operator ()( Key& dest, T const& src ); + }; + \endcode + It should initialize \p dest key from \p src data. The functor is used to initialize internal nodes. + - opt::compare - key compare functor. No default functor is provided. + If the option is not specified, \p %opt::less is used. + - opt::less - specifies binary predicate used for key compare. At least \p %opt::compare or \p %opt::less should be defined. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - opt::allocator - the allocator used for \ref ellen_bintree::node "leaf nodes" which contains data. + Default is \ref CDS_DEFAULT_ALLOCATOR. + - opt::node_allocator - the allocator used for internal nodes. Default is \ref CDS_DEFAULT_ALLOCATOR. + - ellen_bintree::update_desc_allocator - an allocator of \ref ellen_bintree::update_desc "update descriptors", + default is \ref CDS_DEFAULT_ALLOCATOR. + Note that update descriptor is helping data structure with short lifetime and it is good candidate for pooling. + The number of simultaneously existing descriptors is a relatively small number limited the number of threads + working with the tree and RCU buffer size. + Therefore, a bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue is good choice for the free-list + of update descriptors, see cds::memory::vyukov_queue_pool free-list implementation. + Also notice that size of update descriptor is not dependent on the type of data + stored in the tree so single free-list object can be used for several EllenBinTree-based object. + - opt::stat - internal statistics. Available types: ellen_bintree::stat, ellen_bintree::empty_stat (the default) + - opt::rcu_check_deadlock - a deadlock checking policy. Default is opt::v::rcu_throw_deadlock + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + + @anchor cds_container_EllenBinTreeSet_rcu_less + Predicate requirements + + opt::less, opt::compare and other predicates using with member fuctions should accept at least parameters + of type \p T and \p Key in any combination. + For example, for \p Foo struct with \p std::string key field the appropiate \p less functor is: + \code + struct Foo + { + std::string m_strKey; + ... + }; + + struct less { + bool operator()( Foo const& v1, Foo const& v2 ) const + { return v1.m_strKey < v2.m_strKey ; } + + bool operator()( Foo const& v, std::string const& s ) const + { return v.m_strKey < s ; } + + bool operator()( std::string const& s, Foo const& v ) const + { return s < v.m_strKey ; } + + // Support comparing std::string and char const * + bool operator()( std::string const& s, char const * p ) const + { return s.compare(p) < 0 ; } + + bool operator()( Foo const& v, char const * p ) const + { return v.m_strKey.compare(p) < 0 ; } + + bool operator()( char const * p, std::string const& s ) const + { return s.compare(p) > 0; } + + bool operator()( char const * p, Foo const& v ) const + { return v.m_strKey.compare(p) > 0; } + }; + \endcode + + */ + template < + class RCU, + typename Key, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = ellen_bintree::type_traits +#else + class Traits +#endif + > + class EllenBinTreeSet< cds::urcu::gc, Key, T, Traits > +#ifdef CDS_DOXYGEN_INVOKED + : public cds::intrusive::EllenBinTree< cds::urcu::gc, Key, T, Traits > +#else + : public ellen_bintree::details::make_ellen_bintree_set< cds::urcu::gc, Key, T, Traits >::type +#endif + { + //@cond + typedef ellen_bintree::details::make_ellen_bintree_set< cds::urcu::gc, Key, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef cds::urcu::gc gc ; ///< RCU Garbage collector + typedef Key key_type ; ///< type of a key stored in internal nodes; key is a part of \p value_type + typedef T value_type ; ///< type of value stored in the binary tree + typedef Traits options ; ///< Traits template parameter + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key compare functor based on opt::compare and opt::less option setter. +# else + typedef typename maker::intrusive_type_traits::compare key_comparator; +# endif + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::stat stat ; ///< internal statistics type + typedef typename base_class::rcu_check_deadlock rcu_check_deadlock ; ///< Deadlock checking policy + typedef typename options::key_extractor key_extractor ; ///< key extracting functor + + typedef typename options::allocator allocator_type ; ///< Allocator for leaf nodes + typedef typename base_class::node_allocator node_allocator ; ///< Internal node allocator + typedef typename base_class::update_desc_allocator update_desc_allocator ; ///< Update descriptor allocator + + static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions do not require external locking + + protected: + //@cond + typedef typename maker::cxx_leaf_node_allocator cxx_leaf_node_allocator; + typedef typename base_class::value_type leaf_node; + typedef typename base_class::internal_node internal_node; + typedef std::unique_ptr< leaf_node, typename maker::intrusive_type_traits::disposer > scoped_node_ptr; + //@endcond + + public: + typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock + + /// pointer to extracted node + typedef cds::urcu::exempt_ptr< gc, leaf_node, value_type, typename maker::intrusive_type_traits::disposer, + cds::urcu::details::conventional_exempt_member_cast + > exempt_ptr; + + protected: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + template + struct insert_functor + { + Func m_func; + + insert_functor ( Func f ) + : m_func(f) + {} + + void operator()( leaf_node& node ) + { + cds::unref(m_func)( node.m_Value ); + } + }; + + template + struct ensure_functor + { + Func m_func; + Q const& m_arg; + + ensure_functor( Q const& arg, Func f ) + : m_func(f) + , m_arg( arg ) + {} + + void operator ()( bool bNew, leaf_node& node, leaf_node& ) + { + cds::unref(m_func)( bNew, node.m_Value, m_arg ); + } + }; + + template + struct erase_functor + { + Func m_func; + + erase_functor( Func f ) + : m_func(f) + {} + + void operator()( leaf_node const& node ) + { + cds::unref(m_func)( node.m_Value ); + } + }; + + template + struct find_functor + { + Func m_func; + + find_functor( Func f ) + : m_func(f) + {} + + template + void operator ()( leaf_node& node, Q& val ) + { + cds::unref(m_func)( node.m_Value, val ); + } + }; +#endif + //@endcond + + public: + /// Default constructor + EllenBinTreeSet() + : base_class() + {} + + /// Clears the set + ~EllenBinTreeSet() + {} + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain at least the complete key for the node. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + scoped_node_ptr sp( cxx_leaf_node_allocator().New( val )); + if ( base_class::insert( *sp.get() )) { + sp.release(); + return true; + } + return false; + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-fields of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. It may be passed by reference + using boost::ref + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool insert( Q const& val, Func f ) + { + scoped_node_ptr sp( cxx_leaf_node_allocator().New( val )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( base_class::insert( *sp.get(), [&f]( leaf_node& val ) { cds::unref(f)( val.m_Value ); } )) +# else + insert_functor wrapper(f); + if ( base_class::insert( *sp, cds::ref(wrapper) )) +# endif + { + sp.release(); + return true; + } + return false; + } + + /// Ensures that the item exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p val key not found in the set, then the new item created from \p val + is inserted into the set. Otherwise, the functor \p func is called with the item found. + The functor \p Func should be a function with signature: + \code + void func( bool bNew, value_type& item, const Q& val ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p key passed into the \p ensure function + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair ensure( const Q& val, Func func ) + { + scoped_node_ptr sp( cxx_leaf_node_allocator().New( val )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair bRes = base_class::ensure( *sp, + [&func, &val](bool bNew, leaf_node& node, leaf_node&){ cds::unref(func)( bNew, node.m_Value, val ); }); +# else + ensure_functor wrapper( val, func ); + std::pair bRes = base_class::ensure( *sp, cds::ref(wrapper)); +# endif + if ( bRes.first && bRes.second ) + sp.release(); + return bRes; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + RCU \p synchronize method can be called. RCU should not be locked. + + @note This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr sp( cxx_leaf_node_allocator().New( std::forward(args)... )); + if ( base_class::insert( *sp.get() )) { + sp.release(); + return true; + } + return false; + } +# endif + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_EllenBinTreeSet_rcu_erase_val + + The item comparator should be able to compare the type \p value_type + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_rcu_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); + } + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_EllenBinTreeSet_rcu_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type const& val); + }; + \endcode + The functor may be passed by reference using boost:ref + + Since the key of MichaelHashSet's \p value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + + See also: \ref erase + */ + template + bool erase( Q const& key, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase( key, [&f]( leaf_node const& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor wrapper(f); + return base_class::erase( key, cds::ref(wrapper)); +# endif + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_rcu_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), + [&f]( leaf_node const& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor wrapper(f); + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), cds::ref(wrapper)); +# endif + } + + /// Extracts an item with minimal key from the set + /** + If the set is not empty, the function returns \p true, \p result contains a pointer to value. + If the set is empty, the function returns \p false, \p result is left unchanged. + + @note Due the concurrent nature of the set, the function extracts nearly minimum key. + It means that the function gets leftmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item. + The deallocator will be implicitly invoked when \p result object is destroyed or when + result.release() is called, see cds::urcu::exempt_ptr for explanation. + @note Before reusing \p result object you should call its \p release() method. + */ + bool extract_min( exempt_ptr& result ) + { + return base_class::extract_min_( result ); + } + + /// Extracts an item with maximal key from the set + /** + If the set is not empty, the function returns \p true, \p result contains a pointer to extracted item. + If the set is empty, the function returns \p false, \p result is left unchanged. + + @note Due the concurrent nature of the set, the function extracts nearly maximal key. + It means that the function gets rightmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key great than leftmost item's key. + So, the function returns the item with maximum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item. + The deallocator will be implicitly invoked when \p result object is destroyed or when + result.release() is called, see cds::urcu::exempt_ptr for explanation. + @note Before reusing \p result object you should call its \p release() method. + */ + bool extract_max( exempt_ptr& result ) + { + return base_class::extract_max_( result ); + } + + /// Extracts an item from the set + /** \anchor cds_nonintrusive_EllenBinTreeSet_rcu_extract + The function searches an item with key equal to \p key in the tree, + unlinks it, and returns pointer to an item found in \p result parameter. + If \p key is not found the function returns \p false. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not destroy the item found. + The dealloctor will be implicitly invoked when \p result object is destroyed or when + result.release() is called, see cds::urcu::exempt_ptr for explanation. + @note Before reusing \p result object you should call its \p release() method. + */ + template + bool extract( exempt_ptr& result, Q const& key ) + { + return base_class::extract_( result, key, typename base_class::node_compare()); + } + + /// Extracts an item from the set using \p pred for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_rcu_extract "extract(exempt_ptr&, Q const&)" + but \p pred is used for key compare. + \p Less has the interface like \p std::less and should meet \ref cds_container_EllenBinTreeSet_rcu_less + "predicate requirements". + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool extract_with( exempt_ptr& result, Q const& val, Less pred ) + { + return base_class::extract_with_( result, val, + cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >() ); + } + + /// Find the key \p val + /** + @anchor cds_nonintrusive_EllenBinTreeSet_rcu_find_func + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \p value_type. + + The function applies RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) const + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( val, [&f]( leaf_node& node, Q& v ) { cds::unref(f)( node.m_Value, v ); }); +# else + find_functor wrapper(f); + return base_class::find( val, cds::ref(wrapper)); +# endif + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_rcu_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) const + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( val, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), + [&f]( leaf_node& node, Q& v ) { cds::unref(f)( node.m_Value, v ); } ); +# else + find_functor wrapper(f); + return base_class::find_with( val, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), + cds::ref(wrapper)); +# endif + } + + /// Find the key \p val + /** @anchor cds_nonintrusive_EllenBinTreeSet_rcu_find_cfunc + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \p value_type. + + The function applies RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) const + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( val, [&f]( leaf_node& node, Q const& v ) { cds::unref(f)( node.m_Value, v ); }); +# else + find_functor wrapper(f); + return base_class::find( val, cds::ref(wrapper)); +# endif + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_rcu_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred, Func f ) const + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( val, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), + [&f]( leaf_node& node, Q const& v ) { cds::unref(f)( node.m_Value, v ); } ); +# else + find_functor wrapper(f); + return base_class::find_with( val, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), + cds::ref(wrapper)); +# endif + } + + /// Find the key \p val + /** @anchor cds_nonintrusive_EllenBinTreeSet_rcu_find_val + + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \ref value_type. + + The function applies RCU lock internally. + */ + template + bool find( Q const & val ) const + { + return base_class::find( val ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_rcu_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred ) const + { + return base_class::find_with( val, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); + } + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_EllenBinTreeSet_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns \p NULL. + + RCU should be locked before call the function. + Returned pointer is valid while RCU is locked. + */ + template + value_type * get( Q const& key ) const + { + leaf_node * pNode = base_class::get( key ); + return pNode ? &pNode->m_Value : null_ptr(); + } + + /// Finds \p key with \p pred predicate and return the item found + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type + and \p Q in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + value_type * get_with( Q const& key, Less pred ) const + { + leaf_node * pNode = base_class::get_with( key, + cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); + return pNode ? &pNode->m_Value : null_ptr(); + } + + /// Clears the set (non-atomic) + /** + The function unlink all items from the tree. + The function is not atomic, thus, in multi-threaded environment with parallel insertions + this sequence + \code + set.clear(); + assert( set.empty() ); + \endcode + the assertion could be raised. + + For each leaf the \ref disposer will be called after unlinking. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the set is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + /** + Only leaf nodes containing user data are counted. + + The value returned depends on item counter type provided by \p Traits template parameter. + If it is atomicity::empty_item_counter this function always returns 0. + Therefore, the function is not suitable for checking the tree emptiness, use \ref empty + member function for this purpose. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Checks internal consistency (not atomic, not thread-safe) + /** + The debugging function to check internal consistency of the tree. + */ + bool check_consistency() const + { + return base_class::check_consistency(); + } + + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_ELLEN_BINTREE_SET_RCU_H diff --git a/cds/container/fcdeque.h b/cds/container/fcdeque.h new file mode 100644 index 00000000..452d7a51 --- /dev/null +++ b/cds/container/fcdeque.h @@ -0,0 +1,481 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_FCDEQUE_H +#define __CDS_CONTAINER_FCDEQUE_H + +#include +#include +#include + +namespace cds { namespace container { + + /// FCDeque related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace fcdeque { + + /// FCDeque internal statistics + template + struct stat: public cds::algo::flat_combining::stat + { + typedef cds::algo::flat_combining::stat flat_combining_stat; ///< Flat-combining statistics + typedef typename flat_combining_stat::counter_type counter_type; ///< Counter type + + counter_type m_nPushFront ; ///< Count of push_front operations + counter_type m_nPushFrontMove ; ///< Count of push_front operations with move semantics + counter_type m_nPushBack ; ///< Count of push_back operations + counter_type m_nPushBackMove ; ///< Count of push_back operations with move semantics + counter_type m_nPopFront ; ///< Count of success pop_front operations + counter_type m_nFailedPopFront; ///< Count of failed pop_front operations (pop from empty deque) + counter_type m_nPopBack ; ///< Count of success pop_back operations + counter_type m_nFailedPopBack ; ///< Count of failed pop_back operations (pop from empty deque) + counter_type m_nCollided ; ///< How many pairs of push/pop were collided, if elimination is enabled + + //@cond + void onPushFront() { ++m_nPushFront; } + void onPushFrontMove() { ++m_nPushFrontMove; } + void onPushBack() { ++m_nPushBack; } + void onPushBackMove() { ++m_nPushBackMove; } + void onPopFront( bool bFailed ) { if ( bFailed ) ++m_nFailedPopFront; else ++m_nPopFront; } + void onPopBack( bool bFailed ) { if ( bFailed ) ++m_nFailedPopBack; else ++m_nPopBack; } + void onCollide() { ++m_nCollided; } + //@endcond + }; + + /// FCDeque dummy statistics, no overhead + struct empty_stat: public cds::algo::flat_combining::empty_stat + { + //@cond + void onPushFront() {} + void onPushFrontMove() {} + void onPushBack() {} + void onPushBackMove() {} + void onPopFront(bool) {} + void onPopBack(bool) {} + void onCollide() {} + //@endcond + }; + + /// FCDeque type traits + struct type_traits: public cds::algo::flat_combining::type_traits + { + typedef empty_stat stat; ///< Internal statistics + static CDS_CONSTEXPR_CONST bool enable_elimination = false; ///< Enable \ref cds_elimination_description "elimination" + }; + + /// Metafunction converting option list to traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + \p Options are: + - \p opt::lock_type - mutex type, default is \p cds::lock::Spin + - \p opt::back_off - back-off strategy, defalt is \p cds::backoff::Default + - \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default) + - \p opt::memory_model - C++ memory ordering model. + List of all available memory ordering see opt::memory_model. + Default if cds::opt::v:relaxed_ordering + - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" + By default, the elimination is disabled. For queue, the elimination is possible if the queue + is empty. + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS8 >::type + ,CDS_OPTIONS8 + >::type type; +# endif + }; + + } // namespace fcqueue + + /// Flat-combining deque + /** + @ingroup cds_nonintrusive_deque + @ingroup cds_flat_combining_container + + \ref cds_flat_combining_description "Flat combining" sequential deque. + The class can be considered as a concurrent FC-based wrapper for \p std::deque. + + Template parameters: + - \p T - a value type stored in the deque + - \p Deque - sequential deque implementation, for example, \p std::deque (the default) + or \p boost::container::deque + - \p Trats - type traits of flat combining, default is \p fcdeque::type_traits. + \p fcdeque::make_traits metafunction can be used to construct specialized \p %type_traits + */ + template , + typename Traits = fcdeque::type_traits + > + class FCDeque +#ifndef CDS_DOXYGEN_INVOKED + : public cds::algo::flat_combining::container +#endif + { + public: + typedef T value_type; ///< Value type + typedef Deque deque_type; ///< Sequential deque class + typedef Traits type_traits; ///< Deque type traits + + typedef typename type_traits::stat stat; ///< Internal statistics type + static CDS_CONSTEXPR_CONST bool c_bEliminationEnabled = type_traits::enable_elimination; ///< \p true if elimination is enabled + + protected: + //@cond + /// Deque operation IDs + enum fc_operation { + op_push_front = cds::algo::flat_combining::req_Operation, ///< Push front + op_push_front_move, ///< Push front (move semantics) + op_push_back, ///< Push back + op_push_back_move, ///< Push back (move semantics) + op_pop_front, ///< Pop front + op_pop_back, ///< Pop back + op_clear ///< Clear + }; + + /// Flat combining publication list record + struct fc_record: public cds::algo::flat_combining::publication_record + { + union { + value_type const * pValPush; ///< Value to push + value_type * pValPop; ///< Pop destination + }; + bool bEmpty; ///< \p true if the deque is empty + }; + //@endcond + + /// Flat combining kernel + typedef cds::algo::flat_combining::kernel< fc_record, type_traits > fc_kernel; + + protected: + //@cond + fc_kernel m_FlatCombining; + deque_type m_Deque; + //@endcond + + public: + /// Initializes empty deque object + FCDeque() + {} + + /// Initializes empty deque object and gives flat combining parameters + FCDeque( + unsigned int nCompactFactor ///< Flat combining: publication list compacting factor + ,unsigned int nCombinePassCount ///< Flat combining: number of combining passes for combiner thread + ) + : m_FlatCombining( nCompactFactor, nCombinePassCount ) + {} + + /// Inserts a new element at the beginning of the deque container + /** + The function always returns \p true + */ + bool push_front( + value_type const& val ///< Value to be copied to inserted element + ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pValPush = &val; + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_push_front, pRec, *this ); + else + m_FlatCombining.combine( op_push_front, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPushFront(); + return true; + } + +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + /// Inserts a new element at the beginning of the deque container (move semantics) + /** + The function always returns \p true + */ + bool push_front( + value_type&& val ///< Value to be moved to inserted element + ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pValPush = &val; + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_push_front_move, pRec, *this ); + else + m_FlatCombining.combine( op_push_front_move, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPushFrontMove(); + return true; + } +# endif + + /// Inserts a new element at the end of the deque container + /** + The function always returns \p true + */ + bool push_back( + value_type const& val ///< Value to be copied to inserted element + ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pValPush = &val; + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_push_back, pRec, *this ); + else + m_FlatCombining.combine( op_push_back, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPushBack(); + return true; + } + +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + /// Inserts a new element at the end of the deque container (move semantics) + /** + The function always returns \p true + */ + bool push_back( + value_type&& val ///< Value to be moved to inserted element + ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pValPush = &val; + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_push_back_move, pRec, *this ); + else + m_FlatCombining.combine( op_push_back_move, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPushBackMove(); + return true; + } +# endif + + /// Removes the first element in the deque container + /** + The function returns \p false if the deque is empty, \p true otherwise. + If the deque is empty \p val is not changed. + */ + bool pop_front( + value_type& val ///< Target to be received the copy of removed element + ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pValPop = &val; + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_pop_front, pRec, *this ); + else + m_FlatCombining.combine( op_pop_front, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPopFront( pRec->bEmpty ); + return !pRec->bEmpty; + } + + /// Removes the last element in the deque container + /** + The function returns \p false if the deque is empty, \p true otherwise. + If the deque is empty \p val is not changed. + */ + bool pop_back( + value_type& val ///< Target to be received the copy of removed element + ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pValPop = &val; + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_pop_back, pRec, *this ); + else + m_FlatCombining.combine( op_pop_back, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPopBack( pRec->bEmpty ); + return !pRec->bEmpty; + } + + /// Clears the deque + void clear() + { + fc_record * pRec = m_FlatCombining.acquire_record(); + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_clear, pRec, *this ); + else + m_FlatCombining.combine( op_clear, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + } + + /// Returns the number of elements in the deque. + /** + Note that size() == 0 is not mean that the deque is empty because + combining record can be in process. + To check emptiness use \ref empty function. + */ + size_t size() const + { + return m_Deque.size(); + } + + /// Checks if the deque is empty + /** + If the combining is in process the function waits while combining done. + */ + bool empty() const + { + m_FlatCombining.wait_while_combining(); + return m_Deque.empty(); + } + + /// Internal statistics + stat const& statistics() const + { + return m_FlatCombining.statistics(); + } + + public: // flat combining cooperation, not for direct use! + //@cond + /// Flat combining supporting function. Do not call it directly! + /** + The function is called by \ref cds::algo::flat_combining::kernel "flat combining kernel" + object if the current thread becomes a combiner. Invocation of the function means that + the deque should perform an action recorded in \p pRec. + */ + void fc_apply( fc_record * pRec ) + { + assert( pRec ); + + switch ( pRec->op() ) { + case op_push_front: + assert( pRec->pValPush ); + m_Deque.push_front( *(pRec->pValPush) ); + break; +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + case op_push_front_move: + assert( pRec->pValPush ); + m_Deque.push_front( std::move( *(pRec->pValPush )) ); + break; +# endif + case op_push_back: + assert( pRec->pValPush ); + m_Deque.push_back( *(pRec->pValPush) ); + break; +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + case op_push_back_move: + assert( pRec->pValPush ); + m_Deque.push_back( std::move( *(pRec->pValPush )) ); + break; +# endif + case op_pop_front: + assert( pRec->pValPop ); + pRec->bEmpty = m_Deque.empty(); + if ( !pRec->bEmpty ) { + *(pRec->pValPop) = m_Deque.front(); + m_Deque.pop_front(); + } + break; + case op_pop_back: + assert( pRec->pValPop ); + pRec->bEmpty = m_Deque.empty(); + if ( !pRec->bEmpty ) { + *(pRec->pValPop) = m_Deque.back(); + m_Deque.pop_back(); + } + break; + case op_clear: + while ( !m_Deque.empty() ) + m_Deque.pop_front(); + break; + default: + assert(false); + break; + } + } + + /// Batch-processing flat combining + void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd ) + { + typedef typename fc_kernel::iterator fc_iterator; + for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) { + switch ( it->op() ) { + case op_push_front: + case op_push_front_move: + if ( itPrev != itEnd + && (itPrev->op() == op_pop_front || ( m_Deque.empty() && itPrev->op() == op_pop_back ))) + { + collide( *it, *itPrev ); + itPrev = itEnd; + } + else + itPrev = it; + break; + case op_push_back: + case op_push_back_move: + if ( itPrev != itEnd + && (itPrev->op() == op_pop_back || ( m_Deque.empty() && itPrev->op() == op_pop_front ))) + { + collide( *it, *itPrev ); + itPrev = itEnd; + } + else + itPrev = it; + break; + case op_pop_front: + if ( itPrev != itEnd + && ( itPrev->op() == op_push_front || itPrev->op() == op_push_front_move + || ( m_Deque.empty() && ( itPrev->op() == op_push_back || itPrev->op() == op_push_back_move )))) + { + collide( *itPrev, *it ); + itPrev = itEnd; + } + else + itPrev = it; + break; + case op_pop_back: + if ( itPrev != itEnd + && ( itPrev->op() == op_push_back || itPrev->op() == op_push_back_move + || ( m_Deque.empty() && ( itPrev->op() == op_push_front || itPrev->op() == op_push_front_move )))) + { + collide( *itPrev, *it ); + itPrev = itEnd; + } + else + itPrev = it; + break; + } + } + } + //@endcond + + private: + //@cond + void collide( fc_record& recPush, fc_record& recPop ) + { + *(recPop.pValPop) = *(recPush.pValPush); + recPop.bEmpty = false; + m_FlatCombining.operation_done( recPush ); + m_FlatCombining.operation_done( recPop ); + m_FlatCombining.internal_statistics().onCollide(); + } + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_FCDEQUE_H diff --git a/cds/container/fcpriority_queue.h b/cds/container/fcpriority_queue.h new file mode 100644 index 00000000..740b7c51 --- /dev/null +++ b/cds/container/fcpriority_queue.h @@ -0,0 +1,293 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_FCPRIORITY_QUEUE_H +#define __CDS_CONTAINER_FCPRIORITY_QUEUE_H + +#include +#include +#include + +namespace cds { namespace container { + + /// FCPriorityQueue related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace fcpqueue { + + /// FCPriorityQueue internal statistics + template + struct stat: public cds::algo::flat_combining::stat + { + typedef cds::algo::flat_combining::stat flat_combining_stat; ///< Flat-combining statistics + typedef typename flat_combining_stat::counter_type counter_type; ///< Counter type + + counter_type m_nPush ; ///< Count of push operations + counter_type m_nPushMove ; ///< Count of push operations with move semantics + counter_type m_nPop ; ///< Count of success pop operations + counter_type m_nFailedPop; ///< Count of failed pop operations (pop from empty queue) + + //@cond + void onPush() { ++m_nPush; } + void onPushMove() { ++m_nPushMove; } + void onPop( bool bFailed ) { if ( bFailed ) ++m_nFailedPop; else ++m_nPop; } + //@endcond + }; + + /// FCPriorityQueue dummy statistics, no overhead + struct empty_stat: public cds::algo::flat_combining::empty_stat + { + //@cond + void onPush() {} + void onPushMove() {} + void onPop(bool) {} + //@endcond + }; + + /// FCPriorityQueue type traits + struct type_traits: public cds::algo::flat_combining::type_traits + { + typedef empty_stat stat; ///< Internal statistics + }; + + /// Metafunction converting option list to traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + \p Options are: + - \p opt::lock_type - mutex type, default is \p cds::lock::Spin + - \p opt::back_off - back-off strategy, defalt is \p cds::backoff::Default + - \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default) + - \p opt::memory_model - C++ memory ordering model. + List of all available memory ordering see opt::memory_model. + Default is cds::opt::v:relaxed_ordering + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS7 >::type + ,CDS_OPTIONS7 + >::type type; +# endif + }; + + } // namespace fcpqueue + + /// Flat-combining priority queue + /** + @ingroup cds_nonintrusive_priority_queue + @ingroup cds_flat_combining_container + + \ref cds_flat_combining_description "Flat combining" sequential priority queue. + The class can be considered as a concurrent FC-based wrapper for \p std::priority_queue. + + Template parameters: + - \p T - a value type stored in the queue + - \p PriorityQueue - sequential priority queue implementation, default is \p std::priority_queue + - \p Traits - type traits of flat combining, default is \p fcpqueue::type_traits. + \p fcpqueue::make_traits metafunction can be used to construct specialized \p %type_traits + */ + template , + typename Traits = fcpqueue::type_traits + > + class FCPriorityQueue +#ifndef CDS_DOXYGEN_INVOKED + : public cds::algo::flat_combining::container +#endif + { + public: + typedef T value_type; ///< Value type + typedef PriorityQueue priority_queue_type; ///< Sequential priority queue class + typedef Traits type_traits; ///< Priority queue type traits + + typedef typename type_traits::stat stat; ///< Internal statistics type + + protected: + //@cond + // Priority queue operation IDs + enum fc_operation { + op_push = cds::algo::flat_combining::req_Operation, + op_push_move, + op_pop, + op_clear + }; + + // Flat combining publication list record + struct fc_record: public cds::algo::flat_combining::publication_record + { + union { + value_type const * pValPush; // Value to push + value_type * pValPop; // Pop destination + }; + bool bEmpty; // true if the queue is empty + }; + //@endcond + + /// Flat combining kernel + typedef cds::algo::flat_combining::kernel< fc_record, type_traits > fc_kernel; + + protected: + //@cond + fc_kernel m_FlatCombining; + priority_queue_type m_PQueue; + //@endcond + + public: + /// Initializes empty priority queue object + FCPriorityQueue() + {} + + /// Initializes empty priority queue object and gives flat combining parameters + FCPriorityQueue( + unsigned int nCompactFactor ///< Flat combining: publication list compacting factor + ,unsigned int nCombinePassCount ///< Flat combining: number of combining passes for combiner thread + ) + : m_FlatCombining( nCompactFactor, nCombinePassCount ) + {} + + /// Inserts a new element in the priority queue + /** + The function always returns \p true + */ + bool push( + value_type const& val ///< Value to be copied to inserted element + ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pValPush = &val; + + m_FlatCombining.combine( op_push, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPush(); + return true; + } + +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + /// Inserts a new element in the priority queue (move semantics) + /** + The function always returns \p true + */ + bool push( + value_type&& val ///< Value to be moved to inserted element + ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pValPush = &val; + + m_FlatCombining.combine( op_push_move, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPushMove(); + return true; + } +# endif + + /// Removes the top element from priority queue + /** + The function returns \p false if the queue is empty, \p true otherwise. + If the queue is empty \p val is not changed. + */ + bool pop( + value_type& val ///< Target to be received the copy of top element + ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pValPop = &val; + + m_FlatCombining.combine( op_pop, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPop( pRec->bEmpty ); + return !pRec->bEmpty; + } + + /// Clears the priority queue + void clear() + { + fc_record * pRec = m_FlatCombining.acquire_record(); + + m_FlatCombining.combine( op_clear, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + } + + /// Returns the number of elements in the priority queue. + /** + Note that size() == 0 does not mean that the queue is empty because + combining record can be in process. + To check emptiness use \ref empty function. + */ + size_t size() const + { + return m_PQueue.size(); + } + + /// Checks if the priority queue is empty + /** + If the combining is in process the function waits while combining done. + */ + bool empty() const + { + m_FlatCombining.wait_while_combining(); + return m_PQueue.empty(); + } + + /// Internal statistics + stat const& statistics() const + { + return m_FlatCombining.statistics(); + } + + public: // flat combining cooperation, not for direct use! + //@cond + /* + The function is called by \ref cds::algo::flat_combining::kernel "flat combining kernel" + object if the current thread becomes a combiner. Invocation of the function means that + the priority queue should perform an action recorded in \p pRec. + */ + void fc_apply( fc_record * pRec ) + { + assert( pRec ); + + switch ( pRec->op() ) { + case op_push: + assert( pRec->pValPush ); + m_PQueue.push( *(pRec->pValPush) ); + break; +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + case op_push_move: + assert( pRec->pValPush ); + m_PQueue.push( std::move( *(pRec->pValPush )) ); + break; +# endif + case op_pop: + assert( pRec->pValPop ); + pRec->bEmpty = m_PQueue.empty(); + if ( !pRec->bEmpty ) { + *(pRec->pValPop) = m_PQueue.top(); + m_PQueue.pop(); + } + break; + case op_clear: + while ( !m_PQueue.empty() ) + m_PQueue.pop(); + break; + default: + assert(false); + break; + } + } + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_FCPRIORITY_QUEUE_H diff --git a/cds/container/fcqueue.h b/cds/container/fcqueue.h new file mode 100644 index 00000000..b9eb75b8 --- /dev/null +++ b/cds/container/fcqueue.h @@ -0,0 +1,393 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_FCQUEUE_H +#define __CDS_CONTAINER_FCQUEUE_H + +#include +#include +#include + +namespace cds { namespace container { + + /// FCQueue related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace fcqueue { + + /// FCQueue internal statistics + template + struct stat: public cds::algo::flat_combining::stat + { + typedef cds::algo::flat_combining::stat flat_combining_stat; ///< Flat-combining statistics + typedef typename flat_combining_stat::counter_type counter_type; ///< Counter type + + counter_type m_nEnqueue ; ///< Count of enqueue operations + counter_type m_nEnqMove ; ///< Count of enqueue operations with move semantics + counter_type m_nDequeue ; ///< Count of success dequeue operations + counter_type m_nFailedDeq ; ///< Count of failed dequeue operations (pop from empty queue) + counter_type m_nCollided ; ///< How many pairs of enqueue/dequeue were collided, if elimination is enabled + + //@cond + void onEnqueue() { ++m_nEnqueue; } + void onEnqMove() { ++m_nEnqMove; } + void onDequeue( bool bFailed ) { if ( bFailed ) ++m_nFailedDeq; else ++m_nDequeue; } + void onCollide() { ++m_nCollided; } + //@endcond + }; + + /// FCQueue dummy statistics, no overhead + struct empty_stat: public cds::algo::flat_combining::empty_stat + { + //@cond + void onEnqueue() {} + void onEnqMove() {} + void onDequeue(bool) {} + void onCollide() {} + //@endcond + }; + + /// FCQueue type traits + struct type_traits: public cds::algo::flat_combining::type_traits + { + typedef empty_stat stat; ///< Internal statistics + static CDS_CONSTEXPR_CONST bool enable_elimination = false; ///< Enable \ref cds_elimination_description "elimination" + }; + + /// Metafunction converting option list to traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + \p Options are: + - \p opt::lock_type - mutex type, default is \p cds::lock::Spin + - \p opt::back_off - back-off strategy, defalt is \p cds::backoff::Default + - \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default) + - \p opt::memory_model - C++ memory ordering model. + List of all available memory ordering see opt::memory_model. + Default if cds::opt::v:relaxed_ordering + - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" + By default, the elimination is disabled. For queue, the elimination is possible if the queue + is empty. + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS8 >::type + ,CDS_OPTIONS8 + >::type type; +# endif + }; + + } // namespace fcqueue + + /// Flat-combining queue + /** + @ingroup cds_nonintrusive_queue + @ingroup cds_flat_combining_container + + \ref cds_flat_combining_description "Flat combining" sequential queue. + The class can be considered as a concurrent FC-based wrapper for \p std::queue. + + Template parameters: + - \p T - a value type stored in the queue + - \p Queue - sequential queue implementation, default is \p std::queue + - \p Trats - type traits of flat combining, default is \p fcqueue::type_traits. + \p fcqueue::make_traits metafunction can be used to construct specialized \p %type_traits + */ + template , + typename Traits = fcqueue::type_traits + > + class FCQueue +#ifndef CDS_DOXYGEN_INVOKED + : public cds::algo::flat_combining::container +#endif + { + public: + typedef T value_type; ///< Value type + typedef Queue queue_type; ///< Sequential queue class + typedef Traits type_traits; ///< Queue type traits + + typedef typename type_traits::stat stat; ///< Internal statistics type + static CDS_CONSTEXPR_CONST bool c_bEliminationEnabled = type_traits::enable_elimination; ///< \p true if elimination is enabled + + protected: + //@cond + /// Queue operation IDs + enum fc_operation { + op_enq = cds::algo::flat_combining::req_Operation, ///< Enqueue + op_enq_move, ///< Enqueue (move semantics) + op_deq, ///< Dequeue + op_clear ///< Clear + }; + + /// Flat combining publication list record + struct fc_record: public cds::algo::flat_combining::publication_record + { + union { + value_type const * pValEnq; ///< Value to enqueue + value_type * pValDeq; ///< Dequeue destination + }; + bool bEmpty; ///< \p true if the queue is empty + }; + //@endcond + + /// Flat combining kernel + typedef cds::algo::flat_combining::kernel< fc_record, type_traits > fc_kernel; + + protected: + //@cond + fc_kernel m_FlatCombining; + queue_type m_Queue; + //@endcond + + public: + /// Initializes empty queue object + FCQueue() + {} + + /// Initializes empty queue object and gives flat combining parameters + FCQueue( + unsigned int nCompactFactor ///< Flat combining: publication list compacting factor + ,unsigned int nCombinePassCount ///< Flat combining: number of combining passes for combiner thread + ) + : m_FlatCombining( nCompactFactor, nCombinePassCount ) + {} + + /// Inserts a new element at the end of the queue + /** + The content of the new element initialized to a copy of \p val. + + The function always returns \p true + */ + bool enqueue( value_type const& val ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pValEnq = &val; + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_enq, pRec, *this ); + else + m_FlatCombining.combine( op_enq, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onEnqueue(); + return true; + } + + /// Inserts a new element at the end of the queue (a synonym for \ref enqueue) + bool push( value_type const& val ) + { + return enqueue( val ); + } + +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + /// Inserts a new element at the end of the queue (move semantics) + /** + \p val is moved to inserted element + */ + bool enqueue( value_type&& val ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pValEnq = &val; + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_enq_move, pRec, *this ); + else + m_FlatCombining.combine( op_enq_move, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + + m_FlatCombining.internal_statistics().onEnqMove(); + return true; + } + + /// Inserts a new element at the end of the queue (move semantics, synonym for \p enqueue) + bool push( value_type&& val ) + { + return enqueue( val ); + } +# endif + + /// Removes the next element from the queue + /** + \p val takes a copy of the element + */ + bool dequeue( value_type& val ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pValDeq = &val; + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_deq, pRec, *this ); + else + m_FlatCombining.combine( op_deq, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + + m_FlatCombining.internal_statistics().onDequeue( pRec->bEmpty ); + return !pRec->bEmpty; + } + + /// Removes the next element from the queue (a synonym for \ref dequeue) + bool pop( value_type& val ) + { + return dequeue( val ); + } + + /// Clears the queue + void clear() + { + fc_record * pRec = m_FlatCombining.acquire_record(); + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_clear, pRec, *this ); + else + m_FlatCombining.combine( op_clear, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + } + + /// Returns the number of elements in the queue. + /** + Note that size() == 0 is not mean that the queue is empty because + combining record can be in process. + To check emptiness use \ref empty function. + */ + size_t size() const + { + return m_Queue.size(); + } + + /// Checks if the queue is empty + /** + If the combining is in process the function waits while combining done. + */ + bool empty() const + { + m_FlatCombining.wait_while_combining(); + return m_Queue.empty(); + } + + /// Internal statistics + stat const& statistics() const + { + return m_FlatCombining.statistics(); + } + + public: // flat combining cooperation, not for direct use! + //@cond + /// Flat combining supporting function. Do not call it directly! + /** + The function is called by \ref cds::algo::flat_combining::kernel "flat combining kernel" + object if the current thread becomes a combiner. Invocation of the function means that + the queue should perform an action recorded in \p pRec. + */ + void fc_apply( fc_record * pRec ) + { + assert( pRec ); + + switch ( pRec->op() ) { + case op_enq: + assert( pRec->pValEnq ); + m_Queue.push( *(pRec->pValEnq ) ); + break; +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + case op_enq_move: + assert( pRec->pValEnq ); + m_Queue.push( std::move( *(pRec->pValEnq )) ); + break; +# endif + case op_deq: + assert( pRec->pValDeq ); + pRec->bEmpty = m_Queue.empty(); + if ( !pRec->bEmpty ) { + *(pRec->pValDeq) = m_Queue.front(); + m_Queue.pop(); + } + break; + case op_clear: + while ( !m_Queue.empty() ) + m_Queue.pop(); + break; + default: + assert(false); + break; + } + } + + /// Batch-processing flat combining + void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd ) + { + typedef typename fc_kernel::iterator fc_iterator; + for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) { + switch ( it->op() ) { + case op_enq: + case op_enq_move: + case op_deq: + if ( m_Queue.empty() ) { + if ( itPrev != itEnd && collide( *itPrev, *it )) + itPrev = itEnd; + else + itPrev = it; + } + break; + } + } + } + //@endcond + + private: + //@cond + bool collide( fc_record& rec1, fc_record& rec2 ) + { + switch ( rec1.op() ) { + case op_enq: + if ( rec2.op() == op_deq ) { + assert(rec1.pValEnq); + assert(rec2.pValDeq); + *rec2.pValDeq = *rec1.pValEnq; + rec2.bEmpty = false; + goto collided; + } + break; +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + case op_enq_move: + if ( rec2.op() == op_deq ) { + assert(rec1.pValEnq); + assert(rec2.pValDeq); + *rec2.pValDeq = std::move( *rec1.pValEnq ); + rec2.bEmpty = false; + goto collided; + } + break; +# endif + case op_deq: + switch ( rec2.op() ) { + case op_enq: +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + case op_enq_move: +# endif + return collide( rec2, rec1 ); + } + } + return false; + + collided: + m_FlatCombining.operation_done( rec1 ); + m_FlatCombining.operation_done( rec2 ); + m_FlatCombining.internal_statistics().onCollide(); + return true; + } + //@endcond + + }; +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_FCQUEUE_H diff --git a/cds/container/fcstack.h b/cds/container/fcstack.h new file mode 100644 index 00000000..fb9cbbd5 --- /dev/null +++ b/cds/container/fcstack.h @@ -0,0 +1,369 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_FCSTACK_H +#define __CDS_CONTAINER_FCSTACK_H + +#include +#include +#include + +namespace cds { namespace container { + + /// FCStack related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace fcstack { + + /// FCStack internal statistics + template + struct stat: public cds::algo::flat_combining::stat + { + typedef cds::algo::flat_combining::stat flat_combining_stat; ///< Flat-combining statistics + typedef typename flat_combining_stat::counter_type counter_type; ///< Counter type + + counter_type m_nPush ; ///< Count of push operations + counter_type m_nPushMove ; ///< Count of push operations with move semantics + counter_type m_nPop ; ///< Count of success pop operations + counter_type m_nFailedPop; ///< Count of failed pop operations (pop from empty stack) + counter_type m_nCollided ; ///< How many pairs of push/pop were collided, if elimination is enabled + + //@cond + void onPush() { ++m_nPush; } + void onPushMove() { ++m_nPushMove; } + void onPop( bool bFailed ) { if ( bFailed ) ++m_nFailedPop; else ++m_nPop; } + void onCollide() { ++m_nCollided; } + //@endcond + }; + + /// FCStack dummy statistics, no overhead + struct empty_stat: public cds::algo::flat_combining::empty_stat + { + //@cond + void onPush() {} + void onPushMove() {} + void onPop(bool) {} + void onCollide() {} + //@endcond + }; + + /// FCStack type traits + struct type_traits: public cds::algo::flat_combining::type_traits + { + typedef empty_stat stat; ///< Internal statistics + static CDS_CONSTEXPR_CONST bool enable_elimination = false; ///< Enable \ref cds_elimination_description "elimination" + }; + + /// Metafunction converting option list to traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + \p Options are: + - \p opt::lock_type - mutex type, default is \p cds::lock::Spin + - \p opt::back_off - back-off strategy, defalt is \p cds::backoff::Default + - \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default) + - \p opt::memory_model - C++ memory ordering model. + List of all available memory ordering see opt::memory_model. + Default if cds::opt::v:relaxed_ordering + - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" + By default, the elimination is disabled. + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS8 >::type + ,CDS_OPTIONS8 + >::type type; +# endif + }; + + } // namespace fcstack + + /// Flat-combining stack + /** + @ingroup cds_nonintrusive_stack + @ingroup cds_flat_combining_container + + \ref cds_flat_combining_description "Flat combining" sequential stack. + + Template parameters: + - \p T - a value type stored in the stack + - \p Stack - sequential stack implementation, default is \p std::stack + - \p Trats - type traits of flat combining, default is \p fcstack::type_traits + \p fcstack::make_traits metafunction can be used to construct specialized \p %type_traits + */ + template , + typename Traits = fcstack::type_traits + > + class FCStack +#ifndef CDS_DOXYGEN_INVOKED + : public cds::algo::flat_combining::container +#endif + { + public: + typedef T value_type; ///< Value type + typedef Stack stack_type; ///< Sequential stack class + typedef Traits type_traits; ///< Stack type traits + + typedef typename type_traits::stat stat; ///< Internal statistics type + static CDS_CONSTEXPR_CONST bool c_bEliminationEnabled = type_traits::enable_elimination; ///< \p true if elimination is enabled + + protected: + //@cond + /// Stack operation IDs + enum fc_operation { + op_push = cds::algo::flat_combining::req_Operation, ///< Push + op_push_move, ///< Push (move semantics) + op_pop, ///< Pop + op_clear ///< Clear + }; + + /// Flat combining publication list record + struct fc_record: public cds::algo::flat_combining::publication_record + { + union { + value_type const * pValPush; ///< Value to push + value_type * pValPop; ///< Pop destination + }; + bool bEmpty; ///< \p true if the stack is empty + }; + //@endcond + + /// Flat combining kernel + typedef cds::algo::flat_combining::kernel< fc_record, type_traits > fc_kernel; + + protected: + //@cond + fc_kernel m_FlatCombining; + stack_type m_Stack; + //@endcond + + public: + /// Initializes empty stack object + FCStack() + {} + + /// Initializes empty stack object and gives flat combining parameters + FCStack( + unsigned int nCompactFactor ///< Flat combining: publication list compacting factor + ,unsigned int nCombinePassCount ///< Flat combining: number of combining passes for combiner thread + ) + : m_FlatCombining( nCompactFactor, nCombinePassCount ) + {} + + /// Inserts a new element at the top of stack + /** + The content of the new element initialized to a copy of \p val. + */ + bool push( value_type const& val ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pValPush = &val; + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_push, pRec, *this ); + else + m_FlatCombining.combine( op_push, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPush(); + return true; + } + +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + /// Inserts a new element at the top of stack (move semantics) + /** + The content of the new element initialized to a copy of \p val. + */ + bool push( value_type&& val ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pValPush = &val; + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_push_move, pRec, *this ); + else + m_FlatCombining.combine( op_push_move, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + + m_FlatCombining.internal_statistics().onPushMove(); + return true; + } +# endif + + /// Removes the element on top of the stack + /** + \p val takes a copy of top element + */ + bool pop( value_type& val ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pValPop = &val; + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_pop, pRec, *this ); + else + m_FlatCombining.combine( op_pop, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + + m_FlatCombining.internal_statistics().onPop( pRec->bEmpty ); + return !pRec->bEmpty; + } + + /// Clears the stack + void clear() + { + fc_record * pRec = m_FlatCombining.acquire_record(); + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_clear, pRec, *this ); + else + m_FlatCombining.combine( op_clear, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + } + + /// Returns the number of elements in the stack. + /** + Note that size() == 0 is not mean that the stack is empty because + combining record can be in process. + To check emptiness use \ref empty function. + */ + size_t size() const + { + return m_Stack.size(); + } + + /// Checks if the stack is empty + /** + If the combining is in process the function waits while combining done. + */ + bool empty() const + { + m_FlatCombining.wait_while_combining(); + return m_Stack.empty(); + } + + /// Internal statistics + stat const& statistics() const + { + return m_FlatCombining.statistics(); + } + + + public: // flat combining cooperation, not for direct use! + //@cond + /// Flat combining supporting function. Do not call it directly! + /** + The function is called by \ref cds::algo::flat_combining::kernel "flat combining kernel" + object if the current thread becomes a combiner. Invocation of the function means that + the stack should perform an action recorded in \p pRec. + */ + void fc_apply( fc_record * pRec ) + { + assert( pRec ); + + switch ( pRec->op() ) { + case op_push: + assert( pRec->pValPush ); + m_Stack.push( *(pRec->pValPush ) ); + break; +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + case op_push_move: + assert( pRec->pValPush ); + m_Stack.push( std::move( *(pRec->pValPush )) ); + break; +# endif + case op_pop: + assert( pRec->pValPop ); + pRec->bEmpty = m_Stack.empty(); + if ( !pRec->bEmpty ) { + *(pRec->pValPop) = m_Stack.top(); + m_Stack.pop(); + } + break; + case op_clear: + while ( !m_Stack.empty() ) + m_Stack.pop(); + break; + default: + assert(false); + break; + } + } + + /// Batch-processing flat combining + void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd ) + { + typedef typename fc_kernel::iterator fc_iterator; + for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) { + switch ( it->op() ) { + case op_push: + case op_push_move: + case op_pop: + if ( itPrev != itEnd && collide( *itPrev, *it )) + itPrev = itEnd; + else + itPrev = it; + break; + } + } + } + //@endcond + + private: + //@cond + bool collide( fc_record& rec1, fc_record& rec2 ) + { + switch ( rec1.op() ) { + case op_push: + if ( rec2.op() == op_pop ) { + assert(rec1.pValPush); + assert(rec2.pValPop); + *rec2.pValPop = *rec1.pValPush; + rec2.bEmpty = false; + goto collided; + } + break; +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + case op_push_move: + if ( rec2.op() == op_pop ) { + assert(rec1.pValPush); + assert(rec2.pValPop); + *rec2.pValPop = std::move( *rec1.pValPush ); + rec2.bEmpty = false; + goto collided; + } + break; +# endif + case op_pop: + switch ( rec2.op() ) { + case op_push: +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + case op_push_move: +# endif + return collide( rec2, rec1 ); + } + } + return false; + + collided: + m_FlatCombining.operation_done( rec1 ); + m_FlatCombining.operation_done( rec2 ); + m_FlatCombining.internal_statistics().onCollide(); + return true; + } + //@endcond + }; +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_FCSTACK_H diff --git a/cds/container/lazy_kvlist_hp.h b/cds/container/lazy_kvlist_hp.h new file mode 100644 index 00000000..4e6a76ff --- /dev/null +++ b/cds/container/lazy_kvlist_hp.h @@ -0,0 +1,11 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_LAZY_KVLIST_HP_H +#define __CDS_CONTAINER_LAZY_KVLIST_HP_H + +#include +#include +#include +#include + +#endif // #ifndef __CDS_CONTAINER_LAZY_KVLIST_HP_H diff --git a/cds/container/lazy_kvlist_hrc.h b/cds/container/lazy_kvlist_hrc.h new file mode 100644 index 00000000..22a9d828 --- /dev/null +++ b/cds/container/lazy_kvlist_hrc.h @@ -0,0 +1,11 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_LAZY_KVLIST_HRC_H +#define __CDS_CONTAINER_LAZY_KVLIST_HRC_H + +#include +#include +#include +#include + +#endif // #ifndef __CDS_CONTAINER_LAZY_KVLIST_HRC_H diff --git a/cds/container/lazy_kvlist_impl.h b/cds/container/lazy_kvlist_impl.h new file mode 100644 index 00000000..b6a9b868 --- /dev/null +++ b/cds/container/lazy_kvlist_impl.h @@ -0,0 +1,926 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_LAZY_KVLIST_IMPL_H +#define __CDS_CONTAINER_LAZY_KVLIST_IMPL_H + +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Lazy ordered list (key-value pair) + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_LazyKVList_gc + + This is key-value variation of non-intrusive LazyList. + Like standard container, this implementation split a value stored into two part - + constant key and alterable value. + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Template arguments: + - \p GC - garbage collector used + - \p Key - key type of an item stored in the list. It should be copy-constructible + - \p Value - value type stored in the list + - \p Traits - type traits, default is lazy_list::type_traits + + It is possible to declare option-based list with cds::container::lazy_list::make_traits metafunction istead of \p Traits template + argument. For example, the following traits-based declaration of gc::HP lazy list + \code + #include + // Declare comparator for the item + struct my_compare { + int operator ()( int i1, int i2 ) + { + return i1 - i2; + } + }; + + // Declare type_traits + struct my_traits: public cds::container::lazy_list::type_traits + { + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::container::LazyKVList< cds::gc::HP, int, int, my_traits > traits_based_list; + \endcode + + is equivalent for the following option-based list + \code + #include + + // my_compare is the same + + // Declare option-based list + typedef cds::container::LazyKVList< cds::gc::HP, int, int, + typename cds::container::lazy_list::make_traits< + cds::container::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + Template argument list \p Options of cds::container::lazy_list::make_traits metafunction are: + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::allocator - the allocator used for creating and freeing list's item. Default is \ref CDS_DEFAULT_ALLOCATOR macro. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + + \par Usage + There are different specializations of this template for each garbage collecting schema used. + You should include appropriate .h-file depending on GC you are using: + - for gc::HP: \code #include \endcode + - for gc::PTB: \code #include \endcode + - for gc::HRC: \code #include \endcode + - for \ref cds_urcu_desc "RCU": \code #include \endcode + - for gc::nogc: \code #include \endcode + */ + template < + typename GC, + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = lazy_list::type_traits +#else + typename Traits +#endif + > + class LazyKVList: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::LazyList< GC, implementation_defined, Traits > +#else + protected details::make_lazy_kvlist< GC, Key, Value, Traits >::type +#endif + { + //@cond + typedef details::make_lazy_kvlist< GC, Key, Value, Traits > options; + typedef typename options::type base_class; + //@endcond + + public: +#ifdef CDS_DOXYGEN_INVOKED + typedef Key key_type ; ///< Key type + typedef Value mapped_type ; ///< Type of value stored in the list + typedef std::pair value_type ; ///< key/value pair stored in the list +#else + typedef typename options::key_type key_type; + typedef typename options::value_type mapped_type; + typedef typename options::pair_type value_type; +#endif + + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename options::key_comparator key_comparator ; ///< key comparison functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename options::cxx_allocator cxx_allocator; + typedef typename options::node_deallocator node_deallocator; + typedef typename options::type_traits::compare intrusive_key_comparator; + + typedef typename base_class::node_type head_type; + //@endcond + + public: + /// Guarded pointer + typedef cds::gc::guarded_ptr< gc, node_type, value_type, details::guarded_ptr_cast_map > guarded_ptr; + + private: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + template + class insert_functor: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + insert_functor ( Func f ) + : base_class( f ) + {} + + void operator()( node_type& node ) + { + base_class::get()( node.m_Data ); + } + }; + + template + class ensure_functor: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + ensure_functor( Func f ) + : base_class(f) + {} + + void operator ()( bool bNew, node_type& node, node_type& ) + { + base_class::get()( bNew, node.m_Data ); + } + }; + + template + class find_functor: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + find_functor( Func f ) + : base_class(f) + {} + + template + void operator ()( node_type& node, Q& ) + { + base_class::get()( node.m_Data ); + } + }; + + template + struct erase_functor + { + Func m_func; + + erase_functor( Func f ) + : m_func( f ) + {} + + void operator ()( node_type const & node ) + { + cds::unref(m_func)( const_cast(node.m_Data) ); + } + }; +# endif // ifndef CDS_CXX11_LAMBDA_SUPPORT + //@endcond + + protected: + //@cond + template + static node_type * alloc_node(const K& key) + { + return cxx_allocator().New( key ); + } + + template + static node_type * alloc_node( const K& key, const V& val ) + { + return cxx_allocator().New( key, val ); + } + +#ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward(args)... ); + } +#endif + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + head_type& head() + { + return *base_class::head(); + } + + head_type const& head() const + { + return *base_class::head(); + } + + head_type& tail() + { + return *base_class::tail(); + } + + head_type const& tail() const + { + return *base_class::tail(); + } + + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( const_cast(&pNode) ) + {} + iterator_type( head_type const * pNode ) + : iterator_base( const_cast(pNode) ) + {} + + friend class LazyKVList; + + public: + typedef typename cds::details::make_const_type::reference value_ref; + typedef typename cds::details::make_const_type::pointer value_ptr; + + typedef typename cds::details::make_const_type::reference pair_ref; + typedef typename cds::details::make_const_type::pointer pair_ptr; + + iterator_type() + {} + + iterator_type( iterator_type const& src ) + : iterator_base( src ) + {} + + key_type const& key() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != null_ptr() ); + return p->m_Data.first; + } + + value_ref val() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != null_ptr() ); + return p->m_Data.second; + } + + pair_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Data) : null_ptr(); + } + + pair_ref operator *() const + { + typename iterator_base::value_ref p = iterator_base::operator *(); + return p.m_Data; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + /// Forward iterator + /** + The forward iterator for lazy list has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. + For some GC (gc::HP, gc::HRC), a guard is limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if a limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary since it contains GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the list. + + Therefore, the use of iterators in concurrent environment is not good idea. Use the iterator on the concurrent container + for debug purpose only. + + The iterator interface to access item data: + - operator -> - returns a pointer to \ref value_type for iterator + - operator * - returns a reference (a const reference for \p const_iterator) to \ref value_type for iterator + - const key_type& key() - returns a key reference for iterator + - mapped_type& val() - retuns a value reference for iterator (const reference for \p const_iterator) + + For both functions the iterator should not be equal to end() + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + iterator it( head() ); + ++it ; // skip dummy head + return it; + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to NULL. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( tail() ); + } + + /// Returns a forward const iterator addressing the first element in a list + //@{ + const_iterator begin() const + { + const_iterator it( head() ); + ++it; // skip dummy head + return it; + } + const_iterator cbegin() + { + const_iterator it( head() ); + ++it; // skip dummy head + return it; + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a list + //@{ + const_iterator end() const + { + return const_iterator( tail()); + } + const_iterator cend() + { + return const_iterator( tail()); + } + //@} + + public: + /// Default constructor + /** + Initializes empty list + */ + LazyKVList() + {} + + /// List destructor + /** + Clears the list + */ + ~LazyKVList() + { + clear(); + } + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( const K& key ) + { + return insert_at( head(), key ); + } + + /// Inserts new node with a key and a value + /** + The function creates a node with \p key and value \p val, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( const K& key, const V& val ) + { + // We cannot use insert with functor here + // because we cannot lock inserted node for updating + // Therefore, we use separate function + return insert_at( head(), key, val ); + } + + /// Inserts new node and initializes it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the list's item inserted. item.second is a reference to item's value that may be changed. + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this list's item by concurrent threads. + The user-defined functor can be passed by reference using boost::ref + and it is called only if inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the list; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + */ + template + bool insert_key( const K& key, Func func ) + { + return insert_key_at( head(), key, func ); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref mapped_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + @note This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + return emplace_at( head(), std::forward(args)... ); + } +# endif + + /// Ensures that the \p key exists in the list + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + is inserted into the list (note that in this case the \ref key_type should be + copy-constructible from type \p K). + Otherwise, the functor \p func is called with item found. + The functor \p Func may be a function with signature: + \code + void func( bool bNew, value_type& item ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + + The functor may change any fields of the \p item.second that is \ref mapped_type; + however, \p func must guarantee that during changing no any other modifications + could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( const K& key, Func f ) + { + return ensure_at( head(), key, f ); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_LazyKVList_hp_erase_val + + Returns \p true if \p key is found and has been deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return erase_at( head(), key, intrusive_key_comparator() ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_hp_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( K const& key, Less pred ) + { + return erase_at( head(), key, typename options::template less_wrapper::type() ); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_LazyKVList_hp_erase_func + The function searches an item with key \p key, calls \p f functor with item found + and deletes it. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& val) { ... } + }; + \endcode + The functor may be passed by reference with boost:ref + + Returns \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + return erase_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_hp_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + return erase_at( head(), key, typename options::template less_wrapper::type(), f ); + } + + /// Extracts the item from the list with specified \p key + /** \anchor cds_nonintrusive_LazyKVList_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the list, and returns it in \p dest parameter. + If the item with key equal to \p key is not found the function returns \p false. + + Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. + + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::LazyKVList< cds::gc::HP, int, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp; + theList.extract( gp, 5 ); + // Deal with gp + // ... + + // Destructor of gp releases internal HP guard and frees the item + } + \endcode + */ + template + bool extract( guarded_ptr& dest, K const& key ) + { + return extract_at( head(), dest.guard(), key, intrusive_key_comparator() ); + } + + /// Extracts the item from the list with comparing functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_hp_extract "extract(guarded_ptr&, K const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool extract_with( guarded_ptr& dest, K const& key, Less pred ) + { + return extract_at( head(), dest.guard(), key, typename options::template less_wrapper::type() ); + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_LazyKVList_hp_find_val + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise + */ + template + bool find( Q const& key ) + { + return find_at( head(), key, intrusive_key_comparator() ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_hp_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& key, Less pred ) + { + return find_at( head(), key, typename options::template less_wrapper::type() ); + } + + /// Finds the key \p key and performs an action with it + /** \anchor cds_nonintrusive_LazyKVList_hp_find_func + The function searches an item with key equal to \p key and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change item.second that is reference to value of node. + Note that the function is only guarantee that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q const& key, Func f ) + { + return find_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_hp_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& key, Less pred, Func f ) + { + return find_at( head(), key, typename options::template less_wrapper::type(), f ); + } + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_LazyKVList_hp_get + The function searches the item with key equal to \p key + and assigns the item found to guarded pointer \p ptr. + The function returns \p true if \p key is found, and \p false otherwise. + If \p key is not found the \p ptr parameter is not changed. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::LazyKVList< cds::gc::HP, int, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp; + if ( theList.get( gp, 5 )) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard and frees the item + } + \endcode + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p K that can be not the same as \p key_type. + */ + template + bool get( guarded_ptr& ptr, K const& key ) + { + return get_at( head(), ptr.guard(), key, intrusive_key_comparator() ); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_hp_get "get(guarded_ptr& ptr, K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool get_with( guarded_ptr& ptr, K const& key, Less pred ) + { + return get_at( head(), ptr.guard(), key, typename options::template less_wrapper::type() ); + } + + /// Checks if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Clears the list + /** + Post-condition: the list is empty + */ + void clear() + { + base_class::clear(); + } + + protected: + //@cond + bool insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != null_ptr() ); + scoped_node_ptr p( pNode ); + + if ( base_class::insert_at( &refHead, *p )) { + p.release(); + return true; + } + + return false; + } + + template + bool insert_at( head_type& refHead, const K& key ) + { + return insert_node_at( refHead, alloc_node( key )); + } + + template + bool insert_at( head_type& refHead, const K& key, const V& val ) + { + return insert_node_at( refHead, alloc_node( key, val )); + } + + template + bool insert_key_at( head_type& refHead, const K& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( base_class::insert_at( &refHead, *pNode, [&f](node_type& node){ cds::unref(f)( node.m_Data ); } )) +# else + insert_functor wrapper( f ); + if ( base_class::insert_at( &refHead, *pNode, cds::ref(wrapper) )) +# endif + { + pNode.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace_at( head_type& refHead, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(args)... )); + } +# endif + + template + bool erase_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::erase_at( &refHead, key, cmp ); + } + + template + bool erase_at( head_type& refHead, K const& key, Compare cmp, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase_at( &refHead, key, cmp, [&f](node_type const & node){cds::unref(f)( const_cast(node.m_Data)); }); +# else + erase_functor wrapper( f ); + return base_class::erase_at( &refHead, key, cmp, cds::ref(wrapper) ); +# endif + } + + template + bool extract_at( head_type& refHead, typename gc::Guard& dest, K const& key, Compare cmp ) + { + return base_class::extract_at( &refHead, dest, key, cmp ); + } + + template + std::pair ensure_at( head_type& refHead, const K& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair ret = base_class::ensure_at( &refHead, *pNode, + [&f]( bool bNew, node_type& node, node_type& ){ cds::unref(f)( bNew, node.m_Data ); }); +# else + ensure_functor wrapper( f ); + std::pair ret = base_class::ensure_at( &refHead, *pNode, cds::ref(wrapper)); +# endif + if ( ret.first && ret.second ) + pNode.release(); + + return ret; + } + + template + bool find_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::find_at( &refHead, key, cmp ); + } + + template + bool find_at( head_type& refHead, K& key, Compare cmp, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_at( &refHead, key, cmp, [&f]( node_type& node, K& ){ cds::unref(f)( node.m_Data ); }); +# else + find_functor wrapper( f ); + return base_class::find_at( &refHead, key, cmp, cds::ref(wrapper) ); +# endif + } + + template + bool get_at( head_type& refHead, typename gc::Guard& guard, K const& key, Compare cmp ) + { + return base_class::get_at( &refHead, guard, key, cmp ); + } + + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_LAZY_KVLIST_IMPL_H diff --git a/cds/container/lazy_kvlist_nogc.h b/cds/container/lazy_kvlist_nogc.h new file mode 100644 index 00000000..73f572d2 --- /dev/null +++ b/cds/container/lazy_kvlist_nogc.h @@ -0,0 +1,621 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_LAZY_KVLIST_NOGC_H +#define __CDS_CONTAINER_LAZY_KVLIST_NOGC_H + +#include +#include +#include +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + + template + struct make_lazy_kvlist_nogc: public make_lazy_kvlist + { + typedef make_lazy_kvlist base_maker; + typedef typename base_maker::node_type node_type; + + struct type_traits: public base_maker::type_traits + { + typedef typename base_maker::node_deallocator disposer; + }; + + typedef intrusive::LazyList type; + }; + + } // namespace details + //@endcond + + /// Lazy ordered list (key-value pair, template specialization for gc::nogc) + /** @ingroup cds_nonintrusive_list + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of list item. + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + See \ref cds_nonintrusive_LazyList_gc "LazyList" for description of template parameters. + + The interface of the specialization is a little different. + */ + template < + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = lazy_list::type_traits +#else + typename Traits +#endif + > + class LazyKVList: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::LazyList< gc::nogc, implementation_defined, Traits > +#else + protected details::make_lazy_kvlist_nogc< Key, Value, Traits >::type +#endif + { + //@cond + typedef details::make_lazy_kvlist_nogc< Key, Value, Traits > options; + typedef typename options::type base_class; + //@endcond + + public: +#ifdef CDS_DOXYGEN_INVOKED + typedef Key key_type ; ///< Key type + typedef Value mapped_type ; ///< Type of value stored in the list + typedef std::pair value_type ; ///< key/value pair stored in the list +#else + typedef typename options::key_type key_type; + typedef typename options::value_type mapped_type; + typedef typename options::pair_type value_type; +#endif + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename options::key_comparator key_comparator ; ///< key comparison functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename options::cxx_allocator cxx_allocator; + typedef typename options::node_deallocator node_deallocator; + typedef typename options::type_traits::compare intrusive_key_comparator; + + typedef typename base_class::node_type head_type; + //@endcond + + private: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct ensure_functor + { + node_type * m_pItemFound; + + ensure_functor() + : m_pItemFound( null_ptr() ) + {} + + void operator ()(bool, node_type& item, node_type& ) + { + m_pItemFound = &item; + } + }; + + template + class find_functor: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + find_functor( Func f ) + : base_class(f) + {} + + template + void operator ()( node_type& node, Q& ) + { + base_class::get()( node.m_Data ); + } + }; +# endif // ifndef CDS_CXX11_LAMBDA_SUPPORT + //@endcond + + protected: + //@cond + template + static node_type * alloc_node(const K& key) + { + return cxx_allocator().New( key ); + } + + template + static node_type * alloc_node( const K& key, const V& val ) + { + return cxx_allocator().New( key, val ); + } + +#ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward(args)... ); + } +#endif + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + head_type& head() + { + return base_class::m_Head; + } + + head_type const& head() const + { + return base_class::m_Head; + } + + head_type& tail() + { + return base_class::m_Tail; + } + + head_type const& tail() const + { + return base_class::m_Tail; + } + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& refNode ) + : iterator_base( const_cast( &refNode )) + {} + + explicit iterator_type( const iterator_base& it ) + : iterator_base( it ) + {} + + friend class LazyKVList; + + protected: + explicit iterator_type( node_type& pNode ) + : iterator_base( &pNode ) + {} + + public: + typedef typename cds::details::make_const_type::reference value_ref; + typedef typename cds::details::make_const_type::pointer value_ptr; + + typedef typename cds::details::make_const_type::reference pair_ref; + typedef typename cds::details::make_const_type::pointer pair_ptr; + + iterator_type() + : iterator_base() + {} + + iterator_type( const iterator_type& src ) + : iterator_base( src ) + {} + + key_type const& key() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != null_ptr() ); + return p->m_Data.first; + } + + value_ref val() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != null_ptr() ); + return p->m_Data.second; + } + + pair_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Data) : null_ptr(); + } + + pair_ref operator *() const + { + typename iterator_base::value_ref p = iterator_base::operator *(); + return p.m_Data; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + /// Post-increment + iterator_type operator ++(int) + { + return iterator_base::operator ++(0); + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + /// Forward iterator + /** + The forward iterator for lazy list based on gc::nogc has pre- and post-increment operators. + + The iterator interface to access item data: + - operator -> - returns a pointer to \ref value_type for iterator + - operator * - returns a reference (a const reference for \p const_iterator) to \ref value_type for iterator + - const key_type& key() - returns a key reference for iterator + - mapped_type& val() - retuns a value reference for iterator (const reference for \p const_iterator) + + For both functions the iterator should not be equal to end() + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + iterator it( head() ); + ++it ; // skip dummy head + return it; + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to NULL. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( tail()); + } + + /// Returns a forward const iterator addressing the first element in a list + //@{ + const_iterator begin() const + { + const_iterator it( head() ); + ++it ; // skip dummy head + return it; + } + const_iterator cbegin() + { + const_iterator it( head() ); + ++it ; // skip dummy head + return it; + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a list + //@{ + const_iterator end() const + { + return const_iterator( tail()); + } + const_iterator cend() + { + return const_iterator( tail()); + } + //@} + + protected: + //@cond + iterator node_to_iterator( node_type * pNode ) + { + if ( pNode ) + return iterator( *pNode ); + return end(); + } + //@endcond + + public: + /// Default constructor + /** + Initialize empty list + */ + LazyKVList() + {} + + /// List desctructor + /** + Clears the list + */ + ~LazyKVList() + { + clear(); + } + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( const K& key ) + { + return node_to_iterator( insert_at( head(), key )); + } + + /// Inserts new node with a key and a value + /** + The function creates a node with \p key and value \p val, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( const K& key, const V& val ) + { + // We cannot use insert with functor here + // because we cannot lock inserted node for updating + // Therefore, we use separate function + return node_to_iterator( insert_at( head(), key, val )); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code void func( value_type& item ) ; endcode + or + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the list's item inserted. item.second is a reference to item's value that may be changed. + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this list's item by concurrent threads. + The user-defined functor can be passed by reference using boost::ref + and it is called only if the inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the list; + - if inserting is successful, initialize the value of item by calling \p f functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert_key( const K& key, Func func ) + { + return node_to_iterator( insert_key_at( head(), key, func )); + } + + /// Ensures that the key \p key exists in the list + /** + The operation inserts new item if the key \p key is not found in the list. + Otherwise, the function returns an iterator that points to item found. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted, \p second is true if new item has been added or \p false if the item + already is in the list. + */ + template + std::pair ensure( const K& key ) + { + std::pair< node_type *, bool > ret = ensure_at( head(), key ); + return std::make_pair( node_to_iterator( ret.first ), ret.second ); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref mapped_type constructed with std::forward(args)... + /** + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + iterator emplace( Args&&... args ) + { + return node_to_iterator( emplace_at( head(), std::forward(args)... )); + } +# endif + + /// Find the key \p key + /** \anchor cds_nonintrusive_LazyKVList_nogc_find + The function searches the item with key equal to \p key + and returns an iterator pointed to item found if the key is found, + and \ref end() otherwise + */ + template + iterator find( Q const& key ) + { + return node_to_iterator( find_at( head(), key, intrusive_key_comparator() ) ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_nogc_find "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + iterator find_with( Q const& key, Less pred ) + { + return node_to_iterator( find_at( head(), key, typename options::template less_wrapper::type() ) ); + } + + /// Check if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Clears the list + /** + Post-condition: the list is empty + */ + void clear() + { + base_class::clear(); + } + + protected: + //@cond + node_type * insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != null_ptr() ); + scoped_node_ptr p( pNode ); + if ( base_class::insert_at( &refHead, *p )) + return p.release(); + + return null_ptr(); + } + + template + node_type * insert_at( head_type& refHead, const K& key ) + { + return insert_node_at( refHead, alloc_node( key )); + } + + template + node_type * insert_at( head_type& refHead, const K& key, const V& val ) + { + return insert_node_at( refHead, alloc_node( key, val )); + } + + template + node_type * insert_key_at( head_type& refHead, const K& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + + if ( base_class::insert_at( &refHead, *pNode )) { + cds::unref(f)( pNode->m_Data ); + return pNode.release(); + } + + return null_ptr(); + } + + + template + std::pair< node_type *, bool > ensure_at( head_type& refHead, const K& key ) + { + scoped_node_ptr pNode( alloc_node( key )); + node_type * pItemFound = null_ptr(); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair ret = base_class::ensure_at( &refHead, *pNode, [&pItemFound](bool, node_type& item, node_type&){ pItemFound = &item; } ); +# else + ensure_functor func; + std::pair ret = base_class::ensure_at( &refHead, *pNode, boost::ref(func) ); + pItemFound = func.m_pItemFound; +# endif + if ( ret.first && ret.second ) + pNode.release(); + + assert( pItemFound != null_ptr() ); + return std::make_pair( pItemFound, ret.second ); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + node_type * emplace_at( head_type& refHead, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(args)... )); + } +#endif + + template + node_type * find_at( head_type& refHead, const K& key, Compare cmp ) + { + return base_class::find_at( &refHead, key, cmp ); + } + + /* + template + bool find_at( head_type& refHead, K& key, Compare cmp, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_at( &refHead, key, cmp, [&f]( node_type& node, K const& ){ cds::unref(f)( node.m_Data ); }); +# else + find_functor wrapper( f ); + return base_class::find_at( &refHead, key, cmp, cds::ref(wrapper) ); +# endif + } + */ + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_LAZY_KVLIST_NOGC_H diff --git a/cds/container/lazy_kvlist_ptb.h b/cds/container/lazy_kvlist_ptb.h new file mode 100644 index 00000000..cdfd1c19 --- /dev/null +++ b/cds/container/lazy_kvlist_ptb.h @@ -0,0 +1,11 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_LAZY_KVLIST_PTB_H +#define __CDS_CONTAINER_LAZY_KVLIST_PTB_H + +#include +#include +#include +#include + +#endif // #ifndef __CDS_CONTAINER_LAZY_KVLIST_PTB_H diff --git a/cds/container/lazy_kvlist_rcu.h b/cds/container/lazy_kvlist_rcu.h new file mode 100644 index 00000000..bbb592f9 --- /dev/null +++ b/cds/container/lazy_kvlist_rcu.h @@ -0,0 +1,947 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_LAZY_KVLIST_RCU_H +#define __CDS_CONTAINER_LAZY_KVLIST_RCU_H + +#include +#include +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Lazy ordered list (key-value pair), template specialization for \ref cds_urcu_desc "RCU" + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_LazyKVList_rcu + + This is key-value variation of non-intrusive LazyList. + Like standard container, this implementation split a value stored into two part - + constant key and alterable value. + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p Key - key type of an item stored in the list. It should be copy-constructible + - \p Value - value type stored in the list + - \p Traits - type traits, default is lazy_list::type_traits + + It is possible to declare option-based list with cds::container::lazy_list::make_traits metafunction istead of \p Traits template + argument. For example, the following traits-based declaration of gc::HP lazy list + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + \code + #include + #include + // Declare comparator for the item + struct my_compare { + int operator ()( int i1, int i2 ) + { + return i1 - i2; + } + }; + + // Declare type_traits + struct my_traits: public cds::container::lazy_list::type_traits + { + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::container::LazyKVList< cds::urcu::gc< cds::urcu::general_threaded<> >, int, int, my_traits > traits_based_list; + \endcode + + is equivalent for the following option-based list + \code + #include + #include + + // my_compare is the same + + // Declare option-based list + typedef cds::container::LazyKVList< cds::urcu::gc< cds::urcu::general_threaded<> >, int, int, + typename cds::container::lazy_list::make_traits< + cds::container::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + Template argument list \p Options of cds::container::lazy_list::make_traits metafunction are: + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::allocator - the allocator used for creating and freeing list's item. Default is \ref CDS_DEFAULT_ALLOCATOR macro. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - opt::rcu_check_deadlock - a deadlock checking policy. Default is opt::v::rcu_throw_deadlock + */ + template < + typename RCU, + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = lazy_list::type_traits +#else + typename Traits +#endif + > + class LazyKVList< cds::urcu::gc, Key, Value, Traits >: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::LazyList< cds::urcu::gc, implementation_defined, Traits > +#else + protected details::make_lazy_kvlist< cds::urcu::gc, Key, Value, Traits >::type +#endif + { + //@cond + typedef details::make_lazy_kvlist< cds::urcu::gc, Key, Value, Traits > options; + typedef typename options::type base_class; + //@endcond + + public: +#ifdef CDS_DOXYGEN_INVOKED + typedef Key key_type ; ///< Key type + typedef Value mapped_type ; ///< Type of value stored in the list + typedef std::pair value_type ; ///< key/value pair stored in the list +#else + typedef typename options::key_type key_type; + typedef typename options::value_type mapped_type; + typedef typename options::pair_type value_type; +#endif + + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename options::key_comparator key_comparator ; ///< key comparison functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::rcu_check_deadlock rcu_check_deadlock ; ///< RCU deadlock checking policy + + typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock + static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions require external locking + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename options::cxx_allocator cxx_allocator; + typedef typename options::node_deallocator node_deallocator; + typedef typename options::type_traits::compare intrusive_key_comparator; + + typedef typename base_class::node_type head_type; + //@endcond + + public: + /// pointer to extracted node + typedef cds::urcu::exempt_ptr< gc, node_type, value_type, typename options::type_traits::disposer, + cds::urcu::details::conventional_exempt_pair_cast + > exempt_ptr; + + private: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + template + class insert_functor: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + insert_functor ( Func f ) + : base_class( f ) + {} + + void operator()( node_type& node ) + { + base_class::get()( node.m_Data ); + } + }; + + template + class ensure_functor: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + ensure_functor( Func f ) + : base_class(f) + {} + + void operator ()( bool bNew, node_type& node, node_type& ) + { + base_class::get()( bNew, node.m_Data ); + } + }; + + template + class find_functor: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + find_functor( Func f ) + : base_class(f) + {} + + template + void operator ()( node_type& node, Q& ) + { + base_class::get()( node.m_Data ); + } + }; + + struct empty_find_functor + { + template + void operator ()( node_type& node, Q& val ) const + {} + }; + + template + struct erase_functor + { + Func m_func; + + erase_functor( Func f ) + : m_func( f ) + {} + + void operator ()( node_type const & node ) + { + cds::unref(m_func)( const_cast(node.m_Data) ); + } + }; +# endif // ifndef CDS_CXX11_LAMBDA_SUPPORT + //@endcond + + protected: + //@cond + template + static node_type * alloc_node(const K& key) + { + return cxx_allocator().New( key ); + } + + template + static node_type * alloc_node( const K& key, const V& val ) + { + return cxx_allocator().New( key, val ); + } + +#ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward(args)... ); + } +#endif + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + head_type& head() + { + return base_class::m_Head; + } + + head_type& head() const + { + return const_cast( base_class::m_Head ); + } + + head_type& tail() + { + return base_class::m_Tail; + } + + head_type& tail() const + { + return const_cast( base_class::m_Tail ); + } + + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( const_cast(&pNode) ) + {} + iterator_type( head_type const * pNode ) + : iterator_base( const_cast(pNode) ) + {} + + friend class LazyKVList; + + public: + typedef typename cds::details::make_const_type::reference value_ref; + typedef typename cds::details::make_const_type::pointer value_ptr; + + typedef typename cds::details::make_const_type::reference pair_ref; + typedef typename cds::details::make_const_type::pointer pair_ptr; + + iterator_type() + {} + + iterator_type( iterator_type const& src ) + : iterator_base( src ) + {} + + key_type const& key() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != null_ptr() ); + return p->m_Data.first; + } + + value_ref val() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != null_ptr() ); + return p->m_Data.second; + } + + pair_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Data) : null_ptr(); + } + + pair_ref operator *() const + { + typename iterator_base::value_ref p = iterator_base::operator *(); + return p.m_Data; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + /// Forward iterator + typedef iterator_type iterator; + + /// Const forward iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + iterator it( head() ); + ++it ; // skip dummy head + return it; + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value pointing to dummy tail node. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( tail() ); + } + + /// Returns a forward const iterator addressing the first element in a list + //@{ + const_iterator begin() const + { + const_iterator it( head() ); + ++it; // skip dummy head + return it; + } + const_iterator cbegin() + { + const_iterator it( head() ); + ++it; // skip dummy head + return it; + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a list + //@{ + const_iterator end() const + { + return const_iterator( tail()); + } + const_iterator cend() + { + return const_iterator( tail()); + } + //@} + + public: + /// Default constructor + /** + Initializes empty list + */ + LazyKVList() + {} + + /// List destructor + /** + Clears the list + */ + ~LazyKVList() + { + clear(); + } + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + The function makes RCU lock internally. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( const K& key ) + { + return insert_at( head(), key ); + } + + /// Inserts new node with a key and a value + /** + The function creates a node with \p key and value \p val, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + The function makes RCU lock internally. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( const K& key, const V& val ) + { + return insert_at( head(), key, val ); + } + + /// Inserts new node and initializes it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the list's item inserted. item.second is a reference to item's value that may be changed. + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this list's item by concurrent threads. + The user-defined functor can be passed by reference using boost::ref + and it is called only if inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the list; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + The function makes RCU lock internally. + */ + template + bool insert_key( const K& key, Func func ) + { + return insert_key_at( head(), key, func ); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref mapped_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + The function makes RCU lock internally. + + @note This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + return emplace_at( head(), std::forward(args)... ); + } +# endif + + /// Ensures that the \p key exists in the list + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + is inserted into the list (note that in this case the \ref key_type should be + copy-constructible from type \p K). + Otherwise, the functor \p func is called with item found. + The functor \p Func may be a function with signature: + \code + void func( bool bNew, value_type& item ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + + The functor may change any fields of the \p item.second that is \ref mapped_type; + however, \p func must guarantee that during changing no any other modifications + could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + The function makes RCU lock internally. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( const K& key, Func f ) + { + return ensure_at( head(), key, f ); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_LazyKVList_rcu_erase + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns \p true if \p key is found and has been deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return erase_at( head(), key, intrusive_key_comparator() ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_rcu_erase "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( K const& key, Less pred ) + { + return erase_at( head(), key, typename options::template less_wrapper::type() ); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_LazyKVList_rcu_erase_func + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& val) { ... } + }; + \endcode + The functor may be passed by reference with boost:ref + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + return erase_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_rcu_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + return erase_at( head(), key, typename options::template less_wrapper::type(), f ); + } + + /// Extracts an item from the list + /** + @anchor cds_nonintrusive_LazyKVList_rcu_extract + The function searches an item with key equal to \p key in the list, + unlinks it from the list, and returns pointer to an item found in \p dest argument. + If \p key is not found the function returns \p false. + + @note The function does NOT call RCU read-side lock or synchronization, + and does NOT dispose the item found. It just excludes the item from the list + and returns a pointer to item found. + You should lock RCU before calling this function. + + \code + #include + #include + + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::container::LazyKVList< rcu, int, Foo > rcu_lazy_list; + + rcu_lazy_list theList; + // ... + + rcu_lazy_list::exempt_ptr p; + { + // first, we should lock RCU + rcu_lazy_list::rcu_lock sl; + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + if ( theList.extract( p, 10 )) { + // do something with p + ... + } + } + // Outside RCU lock section we may safely release extracted pointer. + // release() passes the pointer to RCU reclamation cycle. + p.release(); + \endcode + */ + template + bool extract( exempt_ptr& dest, K const& key ) + { + dest = extract_at( head(), key, intrusive_key_comparator() ); + return !dest.empty(); + } + + /// Extracts an item from the list using \p pred predicate for searching + /** + This function is the analog for \ref cds_nonintrusive_LazyKVList_rcu_extract "extract(exempt_ptr&, K const&)". + The \p pred is a predicate used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as \ref key_comparator. + */ + template + bool extract_with( exempt_ptr& dest, K const& key, Less pred ) + { + dest = extract_at( head(), key, typename options::template less_wrapper::type() ); + return !dest.empty(); + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_LazyKVList_rcu_find_val + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise + + The function applies RCU lock internally. + */ + template + bool find( Q const& key ) const + { + return find_at( head(), key, intrusive_key_comparator() ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_rcu_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& key, Less pred ) const + { + return find_at( head(), key, typename options::template less_wrapper::type() ); + } + + /// Finds the key \p key and performs an action with it + /** \anchor cds_nonintrusive_LazyKVList_rcu_find_func + The function searches an item with key equal to \p key and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change item.second that is reference to value of node. + Note that the function is only guarantee that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function applies RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q const& key, Func f ) const + { + return find_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_rcu_find_func "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& key, Less pred, Func f ) const + { + return find_at( head(), key, typename options::template less_wrapper::type(), f ); + } + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_LazyKVList_rcu_get + The function searches the item with \p key and returns the pointer to item found. + If \p key is not found it returns \p NULL. + + Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::container::LazyKVList< cds::urcu::gc< cds::urcu::general_buffered<> >, int, foo, my_traits > ord_list; + ord_list theList; + // ... + { + // Lock RCU + ord_list::rcu_lock lock; + + ord_list::value_type * pVal = theList.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + // Unlock RCU by rcu_lock destructor + // pVal can be freed at any time after RCU has been unlocked + } + \endcode + */ + template + value_type * get( K const& key ) const + { + return get_at( head(), key, intrusive_key_comparator()); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_rcu_get "get(K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + value_type * get_with( K const& key, Less pred ) const + { + return get_at( head(), key, typename options::template less_wrapper::type()); + } + + /// Checks if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Clears the list + /** + Post-condition: the list is empty + */ + void clear() + { + base_class::clear(); + } + + protected: + //@cond + bool insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != null_ptr() ); + scoped_node_ptr p( pNode ); + + if ( base_class::insert_at( &refHead, *p )) { + p.release(); + return true; + } + + return false; + } + + template + bool insert_at( head_type& refHead, const K& key ) + { + return insert_node_at( refHead, alloc_node( key )); + } + + template + bool insert_at( head_type& refHead, const K& key, const V& val ) + { + return insert_node_at( refHead, alloc_node( key, val )); + } + + template + bool insert_key_at( head_type& refHead, const K& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( base_class::insert_at( &refHead, *pNode, [&f](node_type& node){ cds::unref(f)( node.m_Data ); } )) +# else + insert_functor wrapper( f ); + if ( base_class::insert_at( &refHead, *pNode, cds::ref(wrapper) )) +# endif + { + pNode.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace_at( head_type& refHead, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(args)... )); + } +# endif + + template + bool erase_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::erase_at( &refHead, key, cmp ); + } + + template + bool erase_at( head_type& refHead, K const & key, Compare cmp, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase_at( &refHead, key, cmp, [&f](node_type const & node){cds::unref(f)( const_cast(node.m_Data)); }); +# else + erase_functor wrapper( f ); + return base_class::erase_at( &refHead, key, cmp, cds::ref(wrapper) ); +# endif + } + + template + std::pair ensure_at( head_type& refHead, const K& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair ret = base_class::ensure_at( &refHead, *pNode, + [&f]( bool bNew, node_type& node, node_type& ){ cds::unref(f)( bNew, node.m_Data ); }); +# else + ensure_functor wrapper( f ); + std::pair ret = base_class::ensure_at( &refHead, *pNode, cds::ref(wrapper)); +# endif + if ( ret.first && ret.second ) + pNode.release(); + + return ret; + } + + template + node_type * extract_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::extract_at( &refHead, key, cmp ); + } + + template + bool find_at( head_type& refHead, K const& key, Compare cmp ) const + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_at( &refHead, key, cmp, [](node_type&, K const&) {} ); +# else + return base_class::find_at( &refHead, key, cmp, empty_find_functor() ); +# endif + } + + template + bool find_at( head_type& refHead, K& key, Compare cmp, Func f ) const + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_at( &refHead, key, cmp, [&f]( node_type& node, K& ){ cds::unref(f)( node.m_Data ); }); +# else + find_functor wrapper( f ); + return base_class::find_at( &refHead, key, cmp, cds::ref(wrapper) ); +# endif + } + + template + value_type * get_at( head_type& refHead, K const& val, Compare cmp ) const + { + node_type * pNode = base_class::get_at( &refHead, val, cmp ); + return pNode ? &pNode->m_Data : null_ptr(); + } + + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_LAZY_KVLIST_RCU_H diff --git a/cds/container/lazy_list_base.h b/cds/container/lazy_list_base.h new file mode 100644 index 00000000..099c9074 --- /dev/null +++ b/cds/container/lazy_list_base.h @@ -0,0 +1,127 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_LAZY_LIST_BASE_H +#define __CDS_CONTAINER_LAZY_LIST_BASE_H + +#include +#include +#include + +namespace cds { namespace container { + + /// LazyList ordered list related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace lazy_list { + /// Lazy list default type traits + /** + Either \p compare or \p less or both must be specified. + */ + struct type_traits + { + /// allocator used to allocate new node + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// specifies binary predicate used for key comparison. + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// Lock type used to lock modifying items + /** + Default is cds::lock::Spin + */ + typedef cds::lock::Spin lock_type; + + /// back-off strategy used + /** + If the option is not specified, the cds::backoff::Default is used. + */ + typedef cds::backoff::Default back_off; + + /// Item counter + /** + The type for item counting feature. + Default is no item counter (\ref atomicity::empty_item_counter) + */ + typedef atomicity::empty_item_counter item_counter; + + /// Link fields checking feature + /** + Default is \ref intrusive::opt::debug_check_link + */ + static const opt::link_check_type link_checker = opt::debug_check_link; + + /// C++ memory ordering model + /** + List of available memory ordering see opt::memory_model + */ + typedef opt::v::relaxed_ordering memory_model; + + /// RCU deadlock checking policy (only for \ref cds_intrusive_LazyList_rcu "RCU-based LazyList") + /** + List of available options see opt::rcu_check_deadlock + */ + typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; + + //@cond + // LazyKVList: supporting for split-ordered list + // key accessor (opt::none = internal key type is equal to user key type) + typedef opt::none key_accessor; + + // for internal use only!!! + typedef opt::none boundary_node_type; + + //@endcond + }; + + /// Metafunction converting option list to traits for LazyList + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + + See \ref LazyList, \ref type_traits, \ref cds::opt::make_options. + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS12 >::type + ,CDS_OPTIONS12 + >::type type; +#endif + }; + + + } // namespace lazy_list + + // Forward declarations + template + class LazyList; + + template + class LazyKVList; + + // Tag for selecting lazy list implementation + /** + This struct is empty and it is used only as a tag for selecting LazyList + as ordered list implementation in declaration of some classes. + + See split_list::type_traits::ordered_list as an example. + */ + struct lazy_list_tag + {}; + + +}} // namespace cds::container + + +#endif // #ifndef __CDS_CONTAINER_LAZY_LIST_BASE_H diff --git a/cds/container/lazy_list_hp.h b/cds/container/lazy_list_hp.h new file mode 100644 index 00000000..66816a72 --- /dev/null +++ b/cds/container/lazy_list_hp.h @@ -0,0 +1,11 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_LAZY_LIST_HP_H +#define __CDS_CONTAINER_LAZY_LIST_HP_H + +#include +#include +#include +#include + +#endif // #ifndef __CDS_CONTAINER_LAZY_LIST_HP_H diff --git a/cds/container/lazy_list_hrc.h b/cds/container/lazy_list_hrc.h new file mode 100644 index 00000000..d63bbd15 --- /dev/null +++ b/cds/container/lazy_list_hrc.h @@ -0,0 +1,11 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_LAZY_LIST_HRC_H +#define __CDS_CONTAINER_LAZY_LIST_HRC_H + +#include +#include +#include +#include + +#endif // #ifndef __CDS_CONTAINER_LAZY_LIST_HRC_H diff --git a/cds/container/lazy_list_impl.h b/cds/container/lazy_list_impl.h new file mode 100644 index 00000000..62793198 --- /dev/null +++ b/cds/container/lazy_list_impl.h @@ -0,0 +1,963 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_LAZY_LIST_IMPL_H +#define __CDS_CONTAINER_LAZY_LIST_IMPL_H + +#include +#include + +namespace cds { namespace container { + + /// Lazy ordered list + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_LazyList_gc + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Source: + - [2005] Steve Heller, Maurice Herlihy, Victor Luchangco, Mark Moir, William N. Scherer III, and Nir Shavit + "A Lazy Concurrent List-Based Set Algorithm" + + The lazy list is based on an optimistic locking scheme for inserts and removes, + eliminating the need to use the equivalent of an atomically markable + reference. It also has a novel wait-free membership \p find operation + that does not need to perform cleanup operations and is more efficient. + + It is non-intrusive version of cds::intrusive::LazyList class + + Template arguments: + - \p GC - garbage collector used + - \p T - type stored in the list. The type must be default- and copy-constructible. + - \p Traits - type traits, default is lazy_list::type_traits + + Unlike standard container, this implementation does not divide type \p T into key and value part and + may be used as main building block for hash set algorithms. + + The key is a function (or a part) of type \p T, and this function is specified by Traits::compare functor + or Traits::less predicate + + LazyKVList is a key-value version of lazy non-intrusive list that is closer to the C++ std library approach. + + It is possible to declare option-based list with cds::container::lazy_list::make_traits metafunction istead of \p Traits template + argument. For example, the following traits-based declaration of gc::HP lazy list + \code + #include + // Declare comparator for the item + struct my_compare { + int operator ()( int i1, int i2 ) + { + return i1 - i2; + } + }; + + // Declare type_traits + struct my_traits: public cds::container::lazy_list::type_traits + { + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::container::LazyList< cds::gc::HP, int, my_traits > traits_based_list; + \endcode + + is equivalent for the following option-based list + \code + #include + + // my_compare is the same + + // Declare option-based list + typedef cds::container::LazyList< cds::gc::HP, int, + typename cds::container::lazy_list::make_traits< + cds::container::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + Template argument list \p Options of cds::container::lazy_list::make_traits metafunction are: + - opt::lock_type - lock type for per-node locking. Default is cds::lock::Spin. Note that each node + of the list has member of type \p lock_type, therefore, heavy-weighted locking primitive is not + acceptable as candidate for \p lock_type. + - opt::compare - key compare functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key compare. Default is \p std::less. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::allocator - the allocator used for creating and freeing list's item. Default is \ref CDS_DEFAULT_ALLOCATOR macro. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + + \par Usage + There are different specializations of this template for each garbage collecting schema used. + You should include appropriate .h-file depending on GC you are using: + - for gc::HP: \code #include \endcode + - for gc::PTB: \code #include \endcode + - for gc::HRC: \code #include \endcode + - for \ref cds_urcu_desc "RCU": \code #include \endcode + - for gc::nogc: \code #include \endcode + */ + template < + typename GC, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = lazy_list::type_traits +#else + typename Traits +#endif + > + class LazyList: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::LazyList< GC, T, Traits > +#else + protected details::make_lazy_list< GC, T, Traits >::type +#endif + { + //@cond + typedef details::make_lazy_list< GC, T, Traits > options; + typedef typename options::type base_class; + //@endcond + + public: + typedef T value_type ; ///< Type of value stored in the list + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename options::key_comparator key_comparator ; ///< key comparison functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename options::cxx_allocator cxx_allocator; + typedef typename options::node_deallocator node_deallocator; + typedef typename options::type_traits::compare intrusive_key_comparator; + + typedef typename base_class::node_type head_type; +# ifndef CDS_CXX11_LAMBDA_SUPPORT + typedef typename base_class::empty_erase_functor empty_erase_functor; +# endif + //@endcond + + public: + /// Guarded pointer + typedef cds::gc::guarded_ptr< gc, node_type, value_type, details::guarded_ptr_cast_set > guarded_ptr; + + private: + //@cond + static value_type& node_to_value( node_type& n ) + { + return n.m_Value; + } + static value_type const& node_to_value( node_type const& n ) + { + return n.m_Value; + } + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + template + struct insert_functor + { + Func m_func; + + insert_functor ( Func f ) + : m_func(f) + {} + + void operator()( node_type& node ) + { + cds::unref(m_func)( node_to_value(node) ); + } + }; + + template + struct ensure_functor + { + Func m_func; + const Q& m_arg; + + ensure_functor( const Q& arg, Func f ) + : m_func(f) + , m_arg( arg ) + {} + + void operator ()( bool bNew, node_type& node, node_type& ) + { + cds::unref(m_func)( bNew, node_to_value(node), m_arg ); + } + }; + + template + struct find_functor + { + Func m_func; + + find_functor( Func f ) + : m_func(f) + {} + + template + void operator ()( node_type& node, Q& val ) + { + cds::unref(m_func)( node_to_value(node), val ); + } + }; + + template + struct erase_functor + { + Func m_func; + + erase_functor( Func f ) + : m_func(f) + {} + + void operator()( node_type const& node ) + { + cds::unref(m_func)( node_to_value(node) ); + } + }; +# endif // ifndef CDS_CXX11_LAMBDA_SUPPORT + //@endcond + + protected: + //@cond + template + static node_type * alloc_node( Q const& v ) + { + return cxx_allocator().New( v ); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward(args)... ); + } +# endif + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + head_type& head() + { + return *base_class::head(); + } + + head_type const& head() const + { + return *base_class::head(); + } + + head_type& tail() + { + return *base_class::tail(); + } + + head_type const& tail() const + { + return *base_class::tail(); + } + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( const_cast( &pNode )) + {} + + iterator_type( head_type const * pNode ) + : iterator_base( const_cast( pNode )) + {} + + friend class LazyList; + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + {} + + iterator_type( const iterator_type& src ) + : iterator_base( src ) + {} + + value_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Value) : null_ptr(); + } + + value_ref operator *() const + { + return (iterator_base::operator *()).m_Value; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + /// Forward iterator + /** + The forward iterator for lazy list has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. + For some GC (gc::HP, gc::HRC), a guard is limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if a limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary since it contains GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the list. + + Therefore, the use of iterators in concurrent environment is not good idea. Use the iterator on the concurrent container + for debug purpose only. + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + iterator it( head() ); + ++it ; // skip dummy head node + return it; + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( tail() ); + } + + /// Returns a forward const iterator addressing the first element in a list + //@{ + const_iterator begin() const + { + const_iterator it( head() ); + ++it ; // skip dummy head node + return it; + } + const_iterator cbegin() + { + const_iterator it( head() ); + ++it ; // skip dummy head node + return it; + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a list + //@{ + const_iterator end() const + { + return const_iterator( tail() ); + } + const_iterator cend() + { + return const_iterator( tail() ); + } + //@} + + public: + /// Default constructor + /** + Initializes empty list + */ + LazyList() + {} + + /// List desctructor + /** + Clears the list + */ + ~LazyList() + { + clear(); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the list. + + The type \p Q should contain as minimum the complete key of the node. + The object of \ref value_type should be constructible from \p val of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + return insert_at( head(), val ); + } + + /// Inserts new node + /** + This function inserts new node with default-constructed value and then it calls + \p func functor with signature + \code void func( value_type& itemValue ) ;\endcode + + The argument \p itemValue of user-defined functor \p func is the reference + to the list's item inserted. User-defined functor \p func should guarantee that during changing + item's value no any other changes could be made on this list's item by concurrent threads. + The user-defined functor can be passed by reference using boost::ref + and it is called only if the inserting is success. + + The type \p Q should contain the complete key of the node. + The object of \ref value_type should be constructible from \p key of type \p Q. + + The function allows to split creating of new item into two part: + - create item from \p key with initializing key-fields only; + - insert new item into the list; + - if inserting is successful, initialize non-key fields of item by calling \p f functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + */ + template + bool insert( Q const& key, Func func ) + { + return insert_at( head(), key, func ); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + return emplace_at( head(), std::forward(args)... ); + } +# endif + + /// Ensures that the \p key exists in the list + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + is inserted into the list. Otherwise, the functor \p func is called with the item found. + The functor \p Func should be a function with signature: + \code + void func( bool bNew, value_type& item, const Q& val ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p key passed into the \p ensure function + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( Q const& key, Func f ) + { + return ensure_at( head(), key, f ); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_LazyList_hp_erase_val + Since the key of LazyList's item type \p T is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_at( head(), key, intrusive_key_comparator(), [](value_type const&){} ); +# else + return erase_at( head(), key, intrusive_key_comparator(), empty_erase_functor() ); +# endif + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyList_hp_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& key, Less pred ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_at( head(), key, typename options::template less_wrapper::type(), [](value_type const&){} ); +# else + return erase_at( head(), key, typename options::template less_wrapper::type(), empty_erase_functor() ); +# endif + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_LazyList_hp_erase_func + The function searches an item with key \p key, calls \p f functor with item found + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(const value_type& val) { ... } + }; + \endcode + The functor may be passed by reference with boost:ref + + Since the key of LazyList's item type \p T is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + + See also: \ref erase + */ + template + bool erase( Q const& key, Func f ) + { + return erase_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyList_hp_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + return erase_at( head(), key, typename options::template less_wrapper::type(), f ); + } + + /// Extracts the item from the list with specified \p key + /** \anchor cds_nonintrusive_LazyList_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the list, and returns it in \p dest parameter. + If the item with key equal to \p key is not found the function returns \p false. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::LazyList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp; + theList.extract( gp, 5 ); + // Deal with gp + // ... + + // Destructor of gp releases internal HP guard and frees the item + } + \endcode + */ + template + bool extract( guarded_ptr& dest, Q const& key ) + { + return extract_at( head(), dest.guard(), key, intrusive_key_comparator() ); + } + + /// Extracts the item from the list with comparing functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_LazyList_hp_extract "extract(guarded_ptr&, Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool extract_with( guarded_ptr& dest, Q const& key, Less pred ) + { + return extract_at( head(), dest.guard(), key, typename options::template less_wrapper::type() ); + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_LazyList_hp_find_val + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise + */ + template + bool find( Q const& key ) + { + return find_at( head(), key, intrusive_key_comparator() ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyList_hp_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& key, Less pred ) + { + return find_at( head(), key, typename options::template less_wrapper::type() ); + } + + /// Finds the key \p val and performs an action with it + /** \anchor cds_nonintrusive_LazyList_hp_find_func + The function searches an item with key equal to \p val and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the function is only guarantee + that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return find_at( head(), val, intrusive_key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyList_hp_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { + return find_at( head(), val, typename options::template less_wrapper::type(), f ); + } + + /// Finds the key \p val and performs an action with it + /** \anchor cds_nonintrusive_LazyList_hp_find_cfunc + The function searches an item with key equal to \p val and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return find_at( head(), val, intrusive_key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyList_hp_find_cfunc "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { + return find_at( head(), val, typename options::template less_wrapper::type(), f ); + } + + /// Finds the key \p val and return the item found + /** \anchor cds_nonintrusive_LazyList_hp_get + The function searches the item with key equal to \p val + and assigns the item found to guarded pointer \p ptr. + The function returns \p true if \p val is found, and \p false otherwise. + If \p val is not found the \p ptr parameter is not changed. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::LazyList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp; + if ( theList.get( gp, 5 )) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard and frees the item + } + \endcode + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool get( guarded_ptr& ptr, Q const& val ) + { + return get_at( head(), ptr.guard(), val, intrusive_key_comparator() ); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_nonintrusive_LazyList_hp_get "get( guarded_ptr& ptr, Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool get_with( guarded_ptr& ptr, Q const& val, Less pred ) + { + return get_at( head(), ptr.guard(), val, typename options::template less_wrapper::type() ); + } + + /// Checks if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Clears the list + /** + Post-condition: the list is empty + */ + void clear() + { + base_class::clear(); + } + + protected: + //@cond + bool insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != null_ptr() ); + scoped_node_ptr p( pNode ); + + if ( base_class::insert_at( &refHead, *pNode )) { + p.release(); + return true; + } + + return false; + } + + template + bool insert_at( head_type& refHead, const Q& val ) + { + return insert_node_at( refHead, alloc_node( val )); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace_at( head_type& refHead, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(args)... )); + } +# endif + + template + bool insert_at( head_type& refHead, const Q& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT +# ifdef CDS_BUG_STATIC_MEMBER_IN_LAMBDA + // GCC 4.5-4.7: node_to_value is unaccessible from lambda, + // like as MichaelList::node_to_value that requires to capture *this* despite on node_to_value is static function + value_type& (* n2v)( node_type& ) = node_to_value; + if ( base_class::insert_at( &refHead, *pNode, [&f,n2v](node_type& node){ cds::unref(f)( n2v(node) ); } )) +# else + if ( base_class::insert_at( &refHead, *pNode, [&f](node_type& node){ cds::unref(f)( node_to_value(node) ); } )) +# endif +# else + insert_functor wrapper( f ); + if ( base_class::insert_at( &refHead, *pNode, cds::ref(wrapper) )) +# endif + { + pNode.release(); + return true; + } + return false; + } + + template + bool erase_at( head_type& refHead, const Q& key, Compare cmp, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT +# ifdef CDS_BUG_STATIC_MEMBER_IN_LAMBDA + // GCC 4.5-4.7: node_to_value is unaccessible from lambda, + // like as MichaelList::node_to_value that requires to capture *this* despite on node_to_value is static function + value_type const& (* n2v)( node_type const& ) = node_to_value; + return base_class::erase_at( &refHead, key, cmp, [&f,n2v](node_type const& node){ cds::unref(f)( n2v(node) ); } ); +# else + return base_class::erase_at( &refHead, key, cmp, [&f](node_type const& node){ cds::unref(f)( node_to_value(node) ); } ); +# endif +# else + erase_functor wrapper( f ); + return base_class::erase_at( &refHead, key, cmp, cds::ref(wrapper) ); +# endif + } + + template + bool extract_at( head_type& refHead, typename gc::Guard& dest, Q const& key, Compare cmp ) + { + return base_class::extract_at( &refHead, dest, key, cmp ); + } + + template + std::pair ensure_at( head_type& refHead, const Q& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT +# ifdef CDS_BUG_STATIC_MEMBER_IN_LAMBDA + // GCC 4.5-4.7: node_to_value is unaccessible from lambda, + // like as MichaelList::node_to_value that requires to capture *this* despite on node_to_value is static function + value_type& (* n2v)( node_type& ) = node_to_value; + std::pair ret = base_class::ensure_at( &refHead, *pNode, + [&f, &key, n2v](bool bNew, node_type& node, node_type&){cds::unref(f)( bNew, n2v(node), key ); }); +# else + std::pair ret = base_class::ensure_at( &refHead, *pNode, + [&f, &key](bool bNew, node_type& node, node_type&){cds::unref(f)( bNew, node_to_value(node), key ); }); +# endif +# else + ensure_functor wrapper( key, f ); + std::pair ret = base_class::ensure_at( &refHead, *pNode, cds::ref(wrapper)); +# endif + if ( ret.first && ret.second ) + pNode.release(); + + return ret; + } + + template + bool find_at( head_type& refHead, Q const& key, Compare cmp ) + { + return base_class::find_at( &refHead, key, cmp ); + } + + template + bool find_at( head_type& refHead, Q& val, Compare cmp, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT +# ifdef CDS_BUG_STATIC_MEMBER_IN_LAMBDA + // GCC 4.5-4.7: node_to_value is unaccessible from lambda, + // like as MichaelList::node_to_value that requires to capture *this* despite on node_to_value is static function + value_type& (* n2v)( node_type& ) = node_to_value; + return base_class::find_at( &refHead, val, cmp, [&f,n2v](node_type& node, Q& val){ cds::unref(f)( n2v(node), val ); }); +# else + return base_class::find_at( &refHead, val, cmp, [&f](node_type& node, Q& val){ cds::unref(f)( node_to_value(node), val ); }); +# endif +# else + find_functor wrapper( f ); + return base_class::find_at( &refHead, val, cmp, cds::ref(wrapper) ); +# endif + } + + template + bool get_at( head_type& refHead, typename gc::Guard& guard, Q const& key, Compare cmp ) + { + return base_class::get_at( &refHead, guard, key, cmp ); + } + + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_LAZY_LIST_IMPL_H diff --git a/cds/container/lazy_list_nogc.h b/cds/container/lazy_list_nogc.h new file mode 100644 index 00000000..b10f44c7 --- /dev/null +++ b/cds/container/lazy_list_nogc.h @@ -0,0 +1,467 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_LAZY_LIST_NOGC_H +#define __CDS_CONTAINER_LAZY_LIST_NOGC_H + +#include +#include +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + + template + struct make_lazy_list_nogc: public make_lazy_list + { + typedef make_lazy_list base_maker; + typedef typename base_maker::node_type node_type; + + struct type_traits: public base_maker::type_traits + { + typedef typename base_maker::node_deallocator disposer; + }; + + typedef intrusive::LazyList type; + }; + + } // namespace details + //@endcond + + /// Lazy ordered single-linked list (template specialization for gc::nogc) + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_LazyList_nogc + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of list item. + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + See \ref cds_nonintrusive_LazyList_gc "LazyList" for description of template parameters. + + The interface of the specialization is a little different. + */ + template + class LazyList: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::LazyList< gc::nogc, T, Traits > +#else + protected details::make_lazy_list_nogc< T, Traits >::type +#endif + { + //@cond + typedef details::make_lazy_list_nogc< T, Traits > options; + typedef typename options::type base_class; + //@endcond + + public: + typedef T value_type ; ///< Type of value stored in the list + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename options::key_comparator key_comparator ; ///< key comparison functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename options::cxx_allocator cxx_allocator; + typedef typename options::node_deallocator node_deallocator; + typedef typename options::type_traits::compare intrusive_key_comparator; + + typedef typename base_class::node_type head_type; + //@endcond + + protected: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct ensure_functor + { + node_type * m_pItemFound; + + ensure_functor() + : m_pItemFound( null_ptr() ) + {} + + void operator ()(bool, node_type& item, node_type& ) + { + m_pItemFound = &item; + } + }; +# endif + //@endcond + + protected: + //@cond + static node_type * alloc_node() + { + return cxx_allocator().New(); + } + + static node_type * alloc_node( value_type const& v ) + { + return cxx_allocator().New( v ); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward(args)... ); + } +# endif + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + head_type& head() + { + return base_class::m_Head; + } + + head_type const& head() const + { + return base_class::m_Head; + } + + head_type& tail() + { + return base_class::m_Tail; + } + + head_type const& tail() const + { + return base_class::m_Tail; + } + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( const_cast(&pNode) ) + {} + + explicit iterator_type( const iterator_base& it ) + : iterator_base( it ) + {} + + friend class LazyList; + + protected: + explicit iterator_type( node_type& pNode ) + : iterator_base( &pNode ) + {} + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + {} + + iterator_type( const iterator_type& src ) + : iterator_base( src ) + {} + + value_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Value) : null_ptr(); + } + + value_ref operator *() const + { + return (iterator_base::operator *()).m_Value; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + /// Post-increment + iterator_type operator ++(int) + { + return iterator_base::operator ++(0); + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + iterator it( head() ); + ++it ; // skip dummy head node + return it; + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( tail()); + } + + /// Returns a forward const iterator addressing the first element in a list + //@{ + const_iterator begin() const + { + const_iterator it( head() ); + ++it ; // skip dummy head node + return it; + } + const_iterator cbegin() + { + const_iterator it( head() ); + ++it ; // skip dummy head node + return it; + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a list + //@{ + const_iterator end() const + { + return const_iterator( tail()); + } + const_iterator cend() + { + return const_iterator( tail()); + } + //@} + + protected: + //@cond + iterator node_to_iterator( node_type * pNode ) + { + if ( pNode ) + return iterator( *pNode ); + return end(); + } + //@endcond + + public: + /// Default constructor + /** + Initialize empty list + */ + LazyList() + {} + + /// List desctructor + /** + Clears the list + */ + ~LazyList() + { + clear(); + } + + /// Inserts new node + /** + The function inserts \p val in the list if the list does not contain + an item with key equal to \p val. + + Return an iterator pointing to inserted item if success \ref end() otherwise + */ + template + iterator insert( Q const& val ) + { + return node_to_iterator( insert_at( head(), val ) ); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Return an iterator pointing to inserted item if success \ref end() otherwise + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + iterator emplace( Args&&... args ) + { + return node_to_iterator( emplace_at( head(), std::forward(args)... )); + } +# endif + + /// Ensures that the item \p val exists in the list + /** + The operation inserts new item if the key \p val is not found in the list. + Otherwise, the function returns an iterator that points to item found. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted, \p second is true if new item has been added or \p false if the item + already is in the list. + */ + template + std::pair ensure( Q const& val ) + { + std::pair< node_type *, bool > ret = ensure_at( head(), val ); + return std::make_pair( node_to_iterator( ret.first ), ret.second ); + } + + /// Find the key \p val + /** \anchor cds_nonintrusive_LazyList_nogc_find + The function searches the item with key equal to \p val + and returns an iterator pointed to item found if the key is found, + and \ref end() otherwise + */ + template + iterator find( Q const& key ) + { + return node_to_iterator( find_at( head(), key, intrusive_key_comparator() )); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyList_nogc_find "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + iterator find_with( Q const& key, Less pred ) + { + return node_to_iterator( find_at( head(), key, typename options::template less_wrapper::type() )); + } + + /// Check if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Clears the list + /** + Post-condition: the list is empty + */ + void clear() + { + base_class::clear(); + } + + protected: + //@cond + node_type * insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != null_ptr() ); + scoped_node_ptr p( pNode ); + if ( base_class::insert_at( &refHead, *p )) + return p.release(); + + return null_ptr(); + } + + template + node_type * insert_at( head_type& refHead, Q const& val ) + { + return insert_node_at( refHead, alloc_node( val )); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + node_type * emplace_at( head_type& refHead, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(args)... )); + } +# endif + template + std::pair< node_type *, bool > ensure_at( head_type& refHead, Q const& val ) + { + scoped_node_ptr pNode( alloc_node( val )); + node_type * pItemFound = null_ptr(); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair ret = base_class::ensure_at( &refHead, *pNode, + [&pItemFound](bool, node_type& item, node_type&){ pItemFound = &item; }); +# else + ensure_functor func; + std::pair ret = base_class::ensure_at( &refHead, *pNode, boost::ref(func) ); + pItemFound = func.m_pItemFound; +# endif + assert( pItemFound != null_ptr() ); + + if ( ret.first && ret.second ) + pNode.release(); + + return std::make_pair( pItemFound, ret.second ); + } + + template + node_type * find_at( head_type& refHead, Q const& key, Compare cmp ) + { + return base_class::find_at( &refHead, key, cmp ); + } + + //@endcond + }; +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_LAZY_LIST_NOGC_H diff --git a/cds/container/lazy_list_ptb.h b/cds/container/lazy_list_ptb.h new file mode 100644 index 00000000..4fed1142 --- /dev/null +++ b/cds/container/lazy_list_ptb.h @@ -0,0 +1,11 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_LAZY_LIST_PTB_H +#define __CDS_CONTAINER_LAZY_LIST_PTB_H + +#include +#include +#include +#include + +#endif // #ifndef __CDS_CONTAINER_LAZY_LIST_PTB_H diff --git a/cds/container/lazy_list_rcu.h b/cds/container/lazy_list_rcu.h new file mode 100644 index 00000000..4332a8f1 --- /dev/null +++ b/cds/container/lazy_list_rcu.h @@ -0,0 +1,997 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_LAZY_LIST_RCU_H +#define __CDS_CONTAINER_LAZY_LIST_RCU_H + +#include +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Lazy ordered list (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_LazyList_rcu + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Source: + - [2005] Steve Heller, Maurice Herlihy, Victor Luchangco, Mark Moir, William N. Scherer III, and Nir Shavit + "A Lazy Concurrent List-Based Set Algorithm" + + The lazy list is based on an optimistic locking scheme for inserts and removes, + eliminating the need to use the equivalent of an atomically markable + reference. It also has a novel wait-free membership \p find operation + that does not need to perform cleanup operations and is more efficient. + + It is non-intrusive version of cds::intrusive::LazyList class + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p T - type stored in the list. The type must be default- and copy-constructible. + - \p Traits - type traits, default is lazy_list::type_traits + + The implementation does not divide type \p T into key and value part and + may be used as main building block for hash set containers. + The key is a function (or a part) of type \p T, and this function is specified by Traits::compare functor + or Traits::less predicate + + \ref cds_nonintrusive_LazyKVList_rcu "LazyKVList" is a key-value version + of lazy non-intrusive list that is closer to the C++ std library approach. + + @note Before including you should include + appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list + of existing RCU class and corresponding header files. + + It is possible to declare option-based list with cds::container::lazy_list::make_traits metafunction istead of \p Traits template + argument. For example, the following traits-based declaration of gc::HP lazy list + \code + #include + #include + // Declare comparator for the item + struct my_compare { + int operator ()( int i1, int i2 ) + { + return i1 - i2; + } + }; + + // Declare type_traits + struct my_traits: public cds::container::lazy_list::type_traits + { + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::container::LazyList< cds::urcu::gc< cds::urcu::general_instant<> >, int, my_traits > traits_based_list; + \endcode + + is equivalent for the following option-based list + \code + #include + #include + + // my_compare is the same + + // Declare option-based list + typedef cds::container::LazyList< cds::urcu::gc< cds::urcu::general_instant<> >, int, + typename cds::container::lazy_list::make_traits< + cds::container::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + Template argument list \p Options of cds::container::lazy_list::make_traits metafunction are: + - opt::lock_type - lock type for per-node locking. Default is cds::lock::Spin. Note that each node + of the list has member of type \p lock_type, therefore, heavy-weighted locking primitive is not + acceptable as candidate for \p lock_type. + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::allocator - the allocator used for creating and freeing list's item. Default is \ref CDS_DEFAULT_ALLOCATOR macro. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - opt::rcu_check_deadlock - a deadlock checking policy. Default is opt::v::rcu_throw_deadlock + */ + template < + typename RCU, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = lazy_list::type_traits +#else + typename Traits +#endif + > + class LazyList< cds::urcu::gc, T, Traits >: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::LazyList< cds::urcu::gc, T, Traits > +#else + protected details::make_lazy_list< cds::urcu::gc, T, Traits >::type +#endif + { + //@cond + typedef details::make_lazy_list< cds::urcu::gc, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef T value_type ; ///< Type of value stored in the list + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename maker::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator ; ///< key compare functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::rcu_check_deadlock rcu_check_deadlock ; ///< Deadlock checking policy + + typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock + static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions require external locking + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename maker::cxx_allocator cxx_allocator; + typedef typename maker::node_deallocator node_deallocator; + typedef typename maker::type_traits::compare intrusive_key_comparator; + + typedef typename base_class::node_type head_type; +# ifndef CDS_CXX11_LAMBDA_SUPPORT + typedef typename base_class::empty_erase_functor empty_erase_functor; +# endif + //@endcond + + public: + typedef cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::type_traits::disposer > exempt_ptr; ///< pointer to extracted node + + private: + //@cond + static value_type& node_to_value( node_type& n ) + { + return n.m_Value; + } + static value_type const& node_to_value( node_type const& n ) + { + return n.m_Value; + } + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + template + struct insert_functor + { + Func m_func; + + insert_functor ( Func f ) + : m_func(f) + {} + + void operator()( node_type& node ) + { + cds::unref(m_func)( node_to_value(node) ); + } + }; + + template + struct ensure_functor + { + Func m_func; + Q const& m_arg; + + ensure_functor( Q const& arg, Func f ) + : m_func(f) + , m_arg( arg ) + {} + + void operator ()( bool bNew, node_type& node, node_type& ) + { + cds::unref(m_func)( bNew, node_to_value(node), m_arg ); + } + }; + + template + struct find_functor + { + Func m_func; + + find_functor( Func f ) + : m_func(f) + {} + + template + void operator ()( node_type& node, Q& val ) + { + cds::unref(m_func)( node_to_value(node), val ); + } + }; + + struct empty_find_functor + { + template + void operator ()( node_type& node, Q& val ) const + {} + }; + + template + struct erase_functor + { + Func m_func; + + erase_functor( Func f ) + : m_func(f) + {} + + void operator()( node_type const& node ) + { + cds::unref(m_func)( node_to_value(node) ); + } + }; +# endif // ifndef CDS_CXX11_LAMBDA_SUPPORT + //@endcond + + protected: + //@cond + template + static node_type * alloc_node( Q const& v ) + { + return cxx_allocator().New( v ); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward(args)... ); + } +# endif + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + head_type& head() + { + return base_class::m_Head; + } + + head_type& head() const + { + return const_cast( base_class::m_Head ); + } + + head_type& tail() + { + return base_class::m_Tail; + } + + head_type const& tail() const + { + return base_class::m_Tail; + } + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( const_cast( &pNode )) + {} + + iterator_type( head_type const * pNode ) + : iterator_base( const_cast( pNode )) + {} + + friend class LazyList; + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + {} + + iterator_type( iterator_type const& src ) + : iterator_base( src ) + {} + + value_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Value) : null_ptr(); + } + + value_ref operator *() const + { + return (iterator_base::operator *()).m_Value; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + /// Forward iterator + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + iterator it( head() ); + ++it ; // skip dummy head node + return it; + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( tail() ); + } + + /// Returns a forward const iterator addressing the first element in a list + //@{ + const_iterator begin() const + { + const_iterator it( head() ); + ++it ; // skip dummy head node + return it; + } + const_iterator cbegin() + { + const_iterator it( head() ); + ++it ; // skip dummy head node + return it; + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a list + //@{ + const_iterator end() const + { + return const_iterator( tail() ); + } + const_iterator cend() + { + return const_iterator( tail() ); + } + //@} + + public: + /// Default constructor + /** + Initializes empty list + */ + LazyList() + {} + + /// List desctructor + /** + Clears the list + */ + ~LazyList() + { + clear(); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the list. + + The type \p Q should contain as minimum the complete key of the node. + The object of \ref value_type should be constructible from \p val of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + The function makes RCU lock internally. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + return insert_at( head(), val ); + } + + /// Inserts new node + /** + This function inserts new node with default-constructed value and then it calls + \p func functor with signature + \code void func( value_type& itemValue ) ;\endcode + + The argument \p itemValue of user-defined functor \p func is the reference + to the list's item inserted. User-defined functor \p func should guarantee that during changing + item's value no any other changes could be made on this list's item by concurrent threads. + The user-defined functor can be passed by reference using boost::ref + and it is called only if the inserting is success. + + The type \p Q should contain the complete key of the node. + The object of \ref value_type should be constructible from \p key of type \p Q. + + The function allows to split creating of new item into two part: + - create item from \p key with initializing key-fields only; + - insert new item into the list; + - if inserting is successful, initialize non-key fields of item by calling \p f functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + The function makes RCU lock internally. + */ + template + bool insert( Q const& key, Func func ) + { + return insert_at( head(), key, func ); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + The function makes RCU lock internally. + + @note This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + return emplace_at( head(), std::forward(args)... ); + } +# endif + + /// Ensures that the \p key exists in the list + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + is inserted into the list. Otherwise, the functor \p func is called with the item found. + The functor \p Func should be a function with signature: + \code + void func( bool bNew, value_type& item, Q const& val ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, Q const& val ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p key passed into the \p ensure function + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + The function applies RCU lock internally. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( Q const& key, Func f ) + { + return ensure_at( head(), key, f ); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_LazyList_rcu_erase + Since the key of LazyList's item type \p T is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_at( head(), key, intrusive_key_comparator(), [](value_type const&){} ); +# else + return erase_at( head(), key, intrusive_key_comparator(), empty_erase_functor() ); +# endif + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyList_rcu_erase "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& key, Less pred ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_at( head(), key, typename maker::template less_wrapper::type(), [](value_type const&){} ); +# else + return erase_at( head(), key, typename maker::template less_wrapper::type(), empty_erase_functor() ); +# endif + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_LazyList_rcu_erase_func + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type const& val) { ... } + }; + \endcode + The functor may be passed by reference with boost:ref + + Since the key of LazyList's item type \p T is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { + return erase_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyList_rcu_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + return erase_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + + /// Extracts an item from the list + /** + @anchor cds_nonintrusive_LazyList_rcu_extract + The function searches an item with key equal to \p key in the list, + unlinks it from the list, and returns pointer to an item found in \p dest argument. + If the item with the key equal to \p key is not found the function returns \p false. + + @note The function does NOT call RCU read-side lock or synchronization, + and does NOT dispose the item found. It just excludes the item from the list + and returns a pointer to item found. + You should lock RCU before calling this function. + + \code + #include + #include + + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::container::LazyList< rcu, Foo > rcu_lazy_list; + + rcu_lazy_list theList; + // ... + + rcu_lazy_list::exempt_ptr p; + { + // first, we should lock RCU + rcu_lazy_list::rcu_lock sl; + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + if ( theList.extract( p, 10 )) { + // do something with p + ... + } + } + // Outside RCU lock section we may safely release extracted pointer. + // release() passes the pointer to RCU reclamation cycle. + p.release(); + \endcode + */ + template + bool extract( exempt_ptr& dest, Q const& key ) + { + dest = extract_at( head(), key, intrusive_key_comparator() ); + return !dest.empty(); + } + + /// Extracts an item from the list using \p pred predicate for searching + /** + This function is the analog for \ref cds_nonintrusive_LazyList_rcu_extract "extract(exempt_ptr&, Q const&)". + + The \p pred is a predicate used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as \ref key_comparator. + */ + template + bool extract_with( exempt_ptr& dest, Q const& key, Less pred ) + { + dest = extract_at( head(), key, typename maker::template less_wrapper::type() ); + return !dest.empty(); + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_LazyList_rcu_find_val + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function makes RCU lock internally. + */ + template + bool find( Q const& key ) const + { + return find_at( head(), key, intrusive_key_comparator() ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyList_rcu_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& key, Less pred ) const + { + return find_at( head(), key, typename maker::template less_wrapper::type() ); + } + + /// Finds the key \p val and performs an action with it + /** \anchor cds_nonintrusive_LazyList_rcu_find_func + The function searches an item with key equal to \p val and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the function is only guarantee + that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + The function makes RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) const + { + return find_at( head(), val, intrusive_key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyList_rcu_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& val, Less pred, Func f ) const + { + return find_at( head(), val, typename maker::template less_wrapper::type(), f ); + } + + /// Finds the key \p val and performs an action with it + /** \anchor cds_nonintrusive_LazyList_rcu_find_cfunc + The function searches an item with key equal to \p val and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function makes RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) const + { + return find_at( head(), val, intrusive_key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyList_rcu_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& val, Less pred, Func f ) const + { + return find_at( head(), val, typename maker::template less_wrapper::type(), f ); + } + + /// Finds the key \p val and return the item found + /** \anchor cds_nonintrusive_LazyList_rcu_get + The function searches the item with key equal to \p val and returns the pointer to item found. + If \p val is not found it returns \p NULL. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::container::LazyList< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > ord_list; + ord_list theList; + // ... + { + // Lock RCU + ord_list::rcu_lock lock; + + foo * pVal = theList.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + // Unlock RCU by rcu_lock destructor + // pVal can be freed at any time after RCU has been unlocked + } + \endcode + */ + template + value_type * get( Q const& val ) const + { + return get_at( head(), val, intrusive_key_comparator()); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_nonintrusive_LazyList_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + value_type * get_with( Q const& val, Less pred ) const + { + return get_at( head(), val, typename maker::template less_wrapper::type()); + } + + /// Checks if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Clears the list + /** + Post-condition: the list is empty + */ + void clear() + { + base_class::clear(); + } + + protected: + //@cond + bool insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != null_ptr() ); + scoped_node_ptr p( pNode ); + + if ( base_class::insert_at( &refHead, *pNode )) { + p.release(); + return true; + } + + return false; + } + + template + bool insert_at( head_type& refHead, Q const& val ) + { + return insert_node_at( refHead, alloc_node( val )); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace_at( head_type& refHead, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(args)... )); + } +# endif + + template + bool insert_at( head_type& refHead, Q const& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT +# ifdef CDS_BUG_STATIC_MEMBER_IN_LAMBDA + // GCC 4.5-4.7: node_to_value is unaccessible from lambda, + // like as MichaelList::node_to_value that requires to capture *this* despite on node_to_value is static function + value_type& (* n2v)( node_type& ) = node_to_value; + if ( base_class::insert_at( &refHead, *pNode, [&f,n2v](node_type& node){ cds::unref(f)( n2v(node) ); } )) +# else + if ( base_class::insert_at( &refHead, *pNode, [&f](node_type& node){ cds::unref(f)( node_to_value(node) ); } )) +# endif +# else + insert_functor wrapper( f ); + if ( base_class::insert_at( &refHead, *pNode, cds::ref(wrapper) )) +# endif + { + pNode.release(); + return true; + } + return false; + } + + template + bool erase_at( head_type& refHead, Q const& key, Compare cmp, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT +# ifdef CDS_BUG_STATIC_MEMBER_IN_LAMBDA + // GCC 4.5-4.7: node_to_value is unaccessible from lambda, + // like as MichaelList::node_to_value that requires to capture *this* despite on node_to_value is static function + value_type const& (* n2v)( node_type const& ) = node_to_value; + return base_class::erase_at( &refHead, key, cmp, [&f,n2v](node_type const& node){ cds::unref(f)( n2v(node) ); } ); +# else + return base_class::erase_at( &refHead, key, cmp, [&f](node_type const& node){ cds::unref(f)( node_to_value(node) ); } ); +# endif +# else + erase_functor wrapper( f ); + return base_class::erase_at( &refHead, key, cmp, cds::ref(wrapper) ); +# endif + } + + template + node_type * extract_at( head_type& refHead, Q const& key, Compare cmp ) + { + return base_class::extract_at( &refHead, key, cmp ); + } + + template + std::pair ensure_at( head_type& refHead, Q const& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT +# ifdef CDS_BUG_STATIC_MEMBER_IN_LAMBDA + // GCC 4.5-4.7: node_to_value is unaccessible from lambda, + // like as MichaelList::node_to_value that requires to capture *this* despite on node_to_value is static function + value_type& (* n2v)( node_type& ) = node_to_value; + std::pair ret = base_class::ensure_at( &refHead, *pNode, + [&f, &key, n2v](bool bNew, node_type& node, node_type&){cds::unref(f)( bNew, n2v(node), key ); }); +# else + std::pair ret = base_class::ensure_at( &refHead, *pNode, + [&f, &key](bool bNew, node_type& node, node_type&){cds::unref(f)( bNew, node_to_value(node), key ); }); +# endif +# else + ensure_functor wrapper( key, f ); + std::pair ret = base_class::ensure_at( &refHead, *pNode, cds::ref(wrapper)); +# endif + if ( ret.first && ret.second ) + pNode.release(); + + return ret; + } + + template + bool find_at( head_type& refHead, Q const& key, Compare cmp ) const + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_at( &refHead, key, cmp, [](node_type&, Q const &) {} ); +# else + return base_class::find_at( &refHead, key, cmp, empty_find_functor() ); +# endif + } + + template + bool find_at( head_type& refHead, Q& val, Compare cmp, Func f ) const + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT +# ifdef CDS_BUG_STATIC_MEMBER_IN_LAMBDA + // GCC 4.5-4.7: node_to_value is unaccessible from lambda, + // like as MichaelList::node_to_value that requires to capture *this* despite on node_to_value is static function + value_type& (* n2v)( node_type& ) = node_to_value; + return base_class::find_at( &refHead, val, cmp, [&f,n2v](node_type& node, Q& val){ cds::unref(f)( n2v(node), val ); }); +# else + return base_class::find_at( &refHead, val, cmp, [&f](node_type& node, Q& val){ cds::unref(f)( node_to_value(node), val ); }); +# endif +# else + find_functor wrapper( f ); + return base_class::find_at( &refHead, val, cmp, cds::ref(wrapper) ); +# endif + } + + template + value_type * get_at( head_type& refHead, Q const& val, Compare cmp ) const + { + node_type * pNode = base_class::get_at( &refHead, val, cmp ); + return pNode ? &pNode->m_Value : null_ptr(); + } + + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_LAZY_LIST_RCU_H diff --git a/cds/container/michael_deque.h b/cds/container/michael_deque.h new file mode 100644 index 00000000..1d80e502 --- /dev/null +++ b/cds/container/michael_deque.h @@ -0,0 +1,493 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_DEQUE_H +#define __CDS_CONTAINER_MICHAEL_DEQUE_H + +#include +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + template + struct make_michael_deque + { + typedef GC gc; + typedef T value_type; + + struct default_options + { + typedef cds::backoff::empty back_off; + typedef cds::atomicity::empty_item_counter item_counter; + typedef cds::intrusive::michael_deque::dummy_stat stat; + typedef cds::opt::v::relaxed_ordering memory_model; + enum { alignment = cds::opt::cache_line_alignment }; + typedef CDS_DEFAULT_ALLOCATOR allocator; + }; + + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< default_options, CDS_OPTIONS7 >::type + ,CDS_OPTIONS7 + >::type options; + + struct node_type : public cds::intrusive::michael_deque::node< gc > + { + value_type m_value; + node_type() + {} + node_type(const value_type& val) + : m_value( val ) + {} +# ifdef CDS_EMPLACE_SUPPORT + template + node_type( Args&&... args ) + : m_value( std::forward(args)...) + {} +# endif + }; + + typedef typename options::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + + typedef cds::intrusive::MichaelDeque< gc, + node_type + ,cds::intrusive::opt::hook< + cds::intrusive::michael_deque::base_hook< cds::opt::gc > + > + ,cds::opt::back_off< typename options::back_off > + ,cds::intrusive::opt::disposer< node_deallocator > + ,cds::opt::item_counter< typename options::item_counter > + ,cds::opt::stat< typename options::stat > + ,cds::opt::alignment< options::alignment > + ,cds::opt::memory_model< typename options::memory_model > + > type; + }; + } + //@endcond + + /// Michael's deque + /** @ingroup cds_nonintrusive_deque + + Implementation of Michael's deque algorithm. + + \par Source: + [2003] Maged Michael "CAS-based Lock-free Algorithm for Shared Deque" + + Short description (from Michael's paper) + + The deque is represented as a doubly-linked list. Each node in the list contains two link pointers, + \p pRight and \p pLeft, and a data field. A shared variable, \p Anchor, holds the two anchor + pointers to the leftmost and rightmost nodes in the list, if any, and a three-value + status tag. Anchor must fit in a memory block that can be read and manipulated + using CAS or LL/SC, atomically. Initially both anchor pointers have null values + and the status tag holds the value stable, indicating an empty deque. + + The status tag serves to indicate if the deque is in an unstable state. When + a process finds the deque in an unstable state, it must first attempt to take it + to a stable state before attempting its own operation. + + The algorithm can use 64bit CAS. Instead of a pointer the node contains two + 31bit link indices + one bit for status tag; + this trick allows use 64bit CAS to manipulate \p Anchor. Internal mapper + (based on intrusive::MichaelHashSet intrusive container) + reflects link indices to item pointers. The maximum number of item in + the deque is limited by 2**31 - 1 that is practically unbounded. + + Template arguments: + - \p GC - garbage collector type: gc::HP, gc::PTB. Note that gc::HRC is NOT supported for this container. + - \p T is a type stored in the queue. It should be default-constructible, copy-constructible, assignable type. + - \p Options - options + + Permissible \p Options: + - opt::allocator - allocator (like \p std::allocator). Default is \ref CDS_DEFAULT_ALLOCATOR. + Used for item allocation. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that means no item counting feature + - opt::stat - the type to gather internal statistics. + Possible option value are: \ref intrusive::michael_deque::stat, \ref intrusive::michael_deque::dummy_stat, + user-provided class that supports intrusive::michael_deque::stat interface. + Default is \ref intrusive::michael_deque::dummy_stat. + - opt::alignment - the alignment for internal deque data. Default is opt::cache_line_alignment + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + */ + template + class MichaelDeque: +#ifdef CDS_DOXYGEN_INVOKED + intrusive::MichaelDeque< GC, intrusive::michael_deque::node< T >, Options... > +#else + details::make_michael_deque< GC, T, CDS_OPTIONS7 >::type +#endif + { + //@cond + typedef details::make_michael_deque< GC, T, CDS_OPTIONS7 > options; + typedef typename options::type base_class; + //@endcond + + public: + /// Rebind template arguments + template + struct rebind { + typedef MichaelDeque< GC2, T2, CDS_OTHER_OPTIONS7> other ; ///< Rebinding result + }; + + public: + typedef T value_type ; ///< Value type stored in the deque + + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename options::options::item_counter item_counter ; ///< Item counting policy used + typedef typename options::options::stat stat ; ///< Internal statistics policy used + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + protected: + typedef typename options::node_type node_type ; ///< queue node type (derived from intrusive::single_link::node) + + //@cond + typedef typename options::cxx_allocator cxx_allocator; + typedef typename options::node_deallocator node_deallocator; // deallocate node + typedef typename base_class::node_traits node_traits; + //@endcond + + protected: + ///@cond + static node_type * alloc_node() + { + return cxx_allocator().New(); + } + static node_type * alloc_node( const value_type& val ) + { + return cxx_allocator().New( val ); + } +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward(args)... ); + } +# endif + static void free_node( node_type * p ) + { + node_deallocator()( p ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + bool push_node_back( node_type * pNode ) + { + assert( pNode != null_ptr()); + scoped_node_ptr p(pNode); + + if ( base_class::push_back( *pNode ) ) { + p.release(); + return true; + } + return false; + } + + bool push_node_front( node_type * pNode ) + { + assert( pNode != null_ptr()); + scoped_node_ptr p(pNode); + + if ( base_class::push_front( *pNode ) ) { + p.release(); + return true; + } + return false; + } + //@endcond + + public: + /// Default constructor + /** + Initializes the deque object that can contain up to 2**16 - 1 items + */ + MichaelDeque() + {} + + /// Constructor + /** + Initializes the deque object with estimated item count \p nMaxItemCount. + \p nLoadFactor is a parameter of internal memory mapper based on intrusive::MichaelHashSet; + see MichaelHashSet ctor for details + */ + MichaelDeque( unsigned int nMaxItemCount, unsigned int nLoadFactor = 4 ) + : base_class( nMaxItemCount, nLoadFactor ) + {} + + /// Destructor clears the deque + ~MichaelDeque() + {} + + public: + /// Push back (right) side + /** + Push new item \p val to right side of the deque. + */ + bool push_back( value_type const& val ) + { + return push_node_back( alloc_node( val )); + } + + /// Push back (right) side using copy functor + /** + \p Func is a functor called to copy value \p data of type \p Type + which may be differ from type \p T stored in the deque. + The functor's interface is: + \code + struct myFunctor { + void operator()(T& dest, Type const& data) + { + // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + + Requirements The functor \p Func should not throw any exception. + */ + template + bool push_back( Type const& val, Func f ) + { + scoped_node_ptr p( alloc_node()); + unref(f)( p->m_value, val ); + if ( base_class::push_back( *p )) { + p.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Push back (right side) data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if the oprration successful, \p false otherwise. + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace_back( Args&&... args ) + { + return push_node_back( alloc_node( std::forward(args)... )); + } +# endif + + /// Push front (left) side + /** + Push new item \p val to left side of the deque. + */ + bool push_front( value_type const& val ) + { + return push_node_front( alloc_node( val )); + } + + /// Push front side using copy functor + /** + \p Func is a functor called to copy value \p data of type \p Type + which may be differ from type \p T stored in the deque. + The functor's interface is: + \code + struct myFunctor { + void operator()(T& dest, Type const& data) + { + // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + + Requirements The functor \p Func should not throw any exception. + */ + template + bool push_front( Type const& val, Func f ) + { + scoped_node_ptr p( alloc_node()); + unref(f)( p->m_value, val ); + if ( base_class::push_front( *p )) { + p.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Push front (left side) data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if the operation successful, \p false otherwise. + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace_front( Args&&... args ) + { + return push_node_front( alloc_node( std::forward(args)... )); + } +# endif + + /// Pops back side, no return value + /** + The function returns \p true if the deque has not been empty (in other words, an item has been popped), + otherwise the function returns \p false. + */ + bool pop_back() + { + return base_class::pop_back() != null_ptr(); + } + + /// Pops back side a value using copy functor + /** + \p Func is a functor called to copy value popped to \p dest of type \p Type + which may be differ from type \p T stored in the deque. + The functor's interface is: + \code + struct myFunctor { + void operator()(Type& dest, T const& data) + { + // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + + Requirements The functor \p Func should not throw any exception. + */ + template + bool pop_back( Type& dest, Func f ) + { + typename base_class::pop_result res; + if ( base_class::do_pop_back( res )) { + unref(f)( dest, node_traits::to_value_ptr( res.pPopped )->m_value ); + base_class::dispose_result( res ); + return true; + } + return false; + } + + + /// Pops back side, store value popped into \p dest + /** + If deque is not empty, the function returns \p true, \p dest contains copy of + value popped. The assignment operator for type \ref value_type is invoked. + If deque is empty, the function returns \p false, \p dest is unchanged. + */ + bool pop_back( value_type& dest ) + { + typedef cds::details::trivial_assign functor; + return pop_back( dest, functor() ); + } + + /// Pops front side, no return value + /** + The function returns \p true if the deque has not been empty (in other words, an item has been popped), + otherwise the function returns \p false. + */ + bool pop_front() + { + return base_class::pop_front() != null_ptr(); + } + + /// Pops front side a value using copy functor + /** + \p Func is a functor called to copy value popped to \p dest of type \p Type + which may be differ from type \p T stored in the deque. + The functor's interface is: + \code + struct myFunctor { + void operator()(Type& dest, T const& data) + { + // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + + Requirements The functor \p Func should not throw any exception. + */ + template + bool pop_front( Type& dest, Func f ) + { + typename base_class::pop_result res; + if ( base_class::do_pop_front( res )) { + unref(f)( dest, node_traits::to_value_ptr( res.pPopped )->m_value ); + base_class::dispose_result( res ); + return true; + } + return false; + } + + + /// Pops front side, store value popped into \p dest + /** + If deque is not empty, the function returns \p true, \p dest contains copy of + value popped. The assignment operator for type \ref value_type is invoked. + If deque is empty, the function returns \p false, \p dest is unchanged. + */ + bool pop_front( value_type& dest ) + { + typedef cds::details::trivial_assign functor; + return pop_front( dest, functor() ); + } + + /// Returns deque's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact does not mean that the deque + is empty. To check deque emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Checks if the dequeue is empty + bool empty() const + { + return base_class::empty(); + } + + /// Clear the deque + /** + The function repeatedly calls \ref pop_back until it returns \p NULL. + */ + void clear() + { + return base_class::clear(); + } + + /// Returns reference to internal statistics + const stat& statistics() const + { + return base_class::statistics(); + } + }; + +}} // namespace cds::container + + +#endif // #ifndef __CDS_CONTAINER_MICHAEL_DEQUE_H diff --git a/cds/container/michael_kvlist_hp.h b/cds/container/michael_kvlist_hp.h new file mode 100644 index 00000000..4f315b77 --- /dev/null +++ b/cds/container/michael_kvlist_hp.h @@ -0,0 +1,11 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_KVLIST_HP_H +#define __CDS_CONTAINER_MICHAEL_KVLIST_HP_H + +#include +#include +#include +#include + +#endif // #ifndef __CDS_CONTAINER_MICHAEL_KVLIST_HP_H diff --git a/cds/container/michael_kvlist_hrc.h b/cds/container/michael_kvlist_hrc.h new file mode 100644 index 00000000..50143e54 --- /dev/null +++ b/cds/container/michael_kvlist_hrc.h @@ -0,0 +1,11 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_KVLIST_HRC_H +#define __CDS_CONTAINER_MICHAEL_KVLIST_HRC_H + +#include +#include +#include +#include + +#endif // #ifndef __CDS_CONTAINER_MICHAEL_KVLIST_HRC_H diff --git a/cds/container/michael_kvlist_impl.h b/cds/container/michael_kvlist_impl.h new file mode 100644 index 00000000..02cd37fc --- /dev/null +++ b/cds/container/michael_kvlist_impl.h @@ -0,0 +1,911 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_KVLIST_IMPL_H +#define __CDS_CONTAINER_MICHAEL_KVLIST_IMPL_H + +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Michael's ordered list (key-value pair) + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_MichaelKVList_gc + + This is key-value variation of non-intrusive MichaelList. + Like standard container, this implementation split a value stored into two part - + constant key and alterable value. + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Template arguments: + - \p GC - garbage collector used + - \p Key - key type of an item stored in the list. It should be copy-constructible + - \p Value - value type stored in a list + - \p Traits - type traits, default is michael_list::type_traits + + It is possible to declare option-based list with cds::container::michael_list::make_traits metafunction istead of \p Traits template + argument. For example, the following traits-based declaration of gc::HP Michael's list + \code + #include + // Declare comparator for the item + struct my_compare { + int operator ()( int i1, int i2 ) + { + return i1 - i2; + } + }; + + // Declare type_traits + struct my_traits: public cds::container::michael_list::type_traits + { + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::container::MichaelKVList< cds::gc::HP, int, int, my_traits > traits_based_list; + \endcode + + is equivalent for the following option-based list + \code + #include + + // my_compare is the same + + // Declare option-based list + typedef cds::container::MichaelKVList< cds::gc::HP, int, int, + typename cds::container::michael_list::make_traits< + cds::container::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + Template argument list \p Options of cds::container::michael_list::make_traits metafunction are: + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::allocator - the allocator used for creating and freeing list's item. Default is \ref CDS_DEFAULT_ALLOCATOR macro. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + + \par Usage + There are different specializations of this template for each garbage collecting schema used. + You should include appropriate .h-file depending on GC you are using: + - for gc::HP: \code #include \endcode + - for gc::PTB: \code #include \endcode + - for gc::HRC: \code #include \endcode + - for \ref cds_urcu_desc "RCU": \code #include \endcode + - for gc::nogc: \code #include \endcode + */ + template < + typename GC, + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = michael_list::type_traits +#else + typename Traits +#endif + > + class MichaelKVList: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::MichaelList< GC, implementation_defined, Traits > +#else + protected details::make_michael_kvlist< GC, Key, Value, Traits >::type +#endif + { + //@cond + typedef details::make_michael_kvlist< GC, Key, Value, Traits > options; + typedef typename options::type base_class; + //@endcond + + public: +#ifdef CDS_DOXYGEN_INVOKED + typedef Key key_type ; ///< Key type + typedef Value mapped_type ; ///< Type of value stored in the list + typedef std::pair value_type ; ///< key/value pair stored in the list +#else + typedef typename options::key_type key_type; + typedef typename options::value_type mapped_type; + typedef typename options::pair_type value_type; +#endif + + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename options::key_comparator key_comparator ; ///< key comparison functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename options::cxx_allocator cxx_allocator; + typedef typename options::node_deallocator node_deallocator; + typedef typename options::type_traits::compare intrusive_key_comparator; + + typedef typename base_class::atomic_node_ptr head_type; + //@endcond + + public: + /// Guarded pointer + typedef cds::gc::guarded_ptr< gc, node_type, value_type, details::guarded_ptr_cast_map > guarded_ptr; + + private: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + template + class insert_functor: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + insert_functor ( Func f ) + : base_class( f ) + {} + + void operator()( node_type& node ) + { + base_class::get()( node.m_Data ); + } + }; + + template + class ensure_functor: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + ensure_functor( Func f ) + : base_class(f) + {} + + void operator ()( bool bNew, node_type& node, node_type& ) + { + base_class::get()( bNew, node.m_Data ); + } + }; + + template + class find_functor: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + find_functor( Func f ) + : base_class(f) + {} + + template + void operator ()( node_type& node, Q& ) + { + base_class::get()( node.m_Data ); + } + }; + + template + struct erase_functor + { + Func m_func; + + erase_functor( Func f ) + : m_func( f ) + {} + + void operator ()( node_type const & node ) + { + cds::unref(m_func)( const_cast(node.m_Data) ); + } + }; +# endif // ifndef CDS_CXX11_LAMBDA_SUPPORT + //@endcond + + protected: + //@cond + template + static node_type * alloc_node(const K& key) + { + return cxx_allocator().New( key ); + } + + template + static node_type * alloc_node( const K& key, const V& val ) + { + return cxx_allocator().New( key, val ); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node( K&& key, Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward(key), std::forward(args)...); + } +# endif + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + head_type& head() + { + return base_class::m_pHead; + } + + head_type const& head() const + { + return base_class::m_pHead; + } + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( pNode ) + {} + + friend class MichaelKVList; + + public: + typedef typename cds::details::make_const_type::reference value_ref; + typedef typename cds::details::make_const_type::pointer value_ptr; + + typedef typename cds::details::make_const_type::reference pair_ref; + typedef typename cds::details::make_const_type::pointer pair_ptr; + + iterator_type() + {} + + iterator_type( iterator_type const& src ) + : iterator_base( src ) + {} + + key_type const& key() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != null_ptr() ); + return p->m_Data.first; + } + + pair_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Data) : null_ptr(); + } + + pair_ref operator *() const + { + typename iterator_base::value_ref p = iterator_base::operator *(); + return p.m_Data; + } + + value_ref val() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != null_ptr() ); + return p->m_Data.second; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + /// Forward iterator + /** + The forward iterator for Michael's list has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. + For some GC (gc::HP, gc::HRC), a guard is limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if a limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary since it contains GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the list. + + Therefore, the use of iterators in concurrent environment is not good idea. Use the iterator on the concurrent container + for debug purpose only. + + The iterator interface to access item data: + - operator -> - returns a pointer to \ref value_type for iterator + - operator * - returns a reference (a const reference for \p const_iterator) to \ref value_type for iterator + - const key_type& key() - returns a key reference for iterator + - mapped_type& val() - retuns a value reference for iterator (const reference for \p const_iterator) + + For both functions the iterator should not be equal to end() + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( head() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to NULL. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator addressing the first element in a list + //@{ + const_iterator begin() const + { + return const_iterator( head() ); + } + const_iterator cbegin() + { + return const_iterator( head() ); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a list + //@{ + const_iterator end() const + { + return const_iterator(); + } + const_iterator cend() + { + return const_iterator(); + } + //@} + + public: + /// Default constructor + /** + Initializes empty list + */ + MichaelKVList() + {} + + /// List desctructor + /** + Clears the list + */ + ~MichaelKVList() + { + clear(); + } + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( const K& key ) + { + return insert_at( head(), key ); + } + + /// Inserts new node with a key and a value + /** + The function creates a node with \p key and value \p val, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( const K& key, const V& val ) + { + // We cannot use insert with functor here + // because we cannot lock inserted node for updating + // Therefore, we use separate function + return insert_at( head(), key, val ); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the list's item inserted. item.second is a reference to item's value that may be changed. + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this list's item by concurrent threads. + The user-defined functor can be passed by reference using boost::ref + and it is called only if inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the list; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + */ + template + bool insert_key( const K& key, Func func ) + { + return insert_key_at( head(), key, func ); + } + + /// Ensures that the \p key exists in the list + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + is inserted into the list (note that in this case the \ref key_type should be + copy-constructible from type \p K). + Otherwise, the functor \p func is called with item found. + The functor \p Func may be a function with signature: + \code + void func( bool bNew, value_type& item ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + + The functor may change any fields of the \p item.second that is \ref mapped_type; + however, \p func must guarantee that during changing no any other modifications + could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( const K& key, Func f ) + { + return ensure_at( head(), key, f ); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref mapped_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + @note This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( K&& key, Args&&... args ) + { + return emplace_at( head(), std::forward(key), std::forward(args)... ); + } +# endif + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_MichaelKVList_hp_erase_val + + Returns \p true if \p key is found and has been deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return erase_at( head(), key, intrusive_key_comparator() ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_hp_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( K const& key, Less pred ) + { + return erase_at( head(), key, typename options::template less_wrapper::type() ); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_MichaelKVList_hp_erase_func + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& val) { ... } + }; + \endcode + The functor may be passed by reference with boost:ref + + Return \p true if key is found and deleted, \p false otherwise + + See also: \ref erase + */ + template + bool erase( K const& key, Func f ) + { + return erase_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_hp_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + return erase_at( head(), key, typename options::template less_wrapper::type(), f ); + } + + /// Extracts the item from the list with specified \p key + /** \anchor cds_nonintrusive_MichaelKVList_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the list, and returns it in \p dest parameter. + If the item with key equal to \p key is not found the function returns \p false. + + Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. + + The \ref disposer specified in \p Traits class template parameter is called automatically + by garbage collector \p GC specified in class' template parameters when returned \ref guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::MichaelKVList< cds::gc::HP, int, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp; + theList.extract( gp, 5 ); + // Deal with gp + // ... + + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + bool extract( guarded_ptr& dest, K const& key ) + { + return extract_at( head(), dest.guard(), key, intrusive_key_comparator() ); + } + + /// Extracts the item from the list with comparing functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_hp_extract "extract(guarded_ptr&, K const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool extract_with( guarded_ptr& dest, K const& key, Less pred ) + { + return extract_at( head(), dest.guard(), key, typename options::template less_wrapper::type() ); + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_MichaelKVList_hp_find_val + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise + */ + template + bool find( Q const& key ) + { + return find_at( head(), key, intrusive_key_comparator() ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_hp_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& key, Less pred ) + { + return find_at( head(), key, typename options::template less_wrapper::type() ); + } + + /// Finds the key \p key and performs an action with it + /** \anchor cds_nonintrusive_MichaelKVList_hp_find_func + The function searches an item with key equal to \p key and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change item.second that is reference to value of node. + Note that the function is only guarantee that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q const& key, Func f ) + { + return find_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_hp_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& key, Less pred, Func f ) + { + return find_at( head(), key, typename options::template less_wrapper::type(), f ); + } + + /// Finds the \p key and return the item found + /** \anchor cds_nonintrusive_MichaelKVList_hp_get + The function searches the item with key equal to \p key + and assigns the item found to guarded pointer \p ptr. + The function returns \p true if \p key is found, and \p false otherwise. + If \p key is not found the \p ptr parameter is not changed. + + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC automatically when returned \ref guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::MichaelKVList< cds::gc::HP, int, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp; + if ( theList.get( gp, 5 )) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p K that can be not the same as \p key_type. + */ + template + bool get( guarded_ptr& ptr, K const& key ) + { + return get_at( head(), ptr.guard(), key, intrusive_key_comparator() ); + } + + /// Finds the \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_hp_get "get( guarded_ptr& ptr, K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool get_with( guarded_ptr& ptr, K const& key, Less pred ) + { + return get_at( head(), ptr.guard(), key, typename options::template less_wrapper::type() ); + } + + /// Checks if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Clears the list + /** + Post-condition: the list is empty + */ + void clear() + { + base_class::clear(); + } + + protected: + //@cond + bool insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != null_ptr() ); + scoped_node_ptr p( pNode ); + if ( base_class::insert_at( refHead, *pNode )) { + p.release(); + return true; + } + return false; + } + + template + bool insert_at( head_type& refHead, const K& key ) + { + return insert_node_at( refHead, alloc_node( key )); + } + + template + bool insert_at( head_type& refHead, const K& key, const V& val ) + { + return insert_node_at( refHead, alloc_node( key, val )); + } + + template + bool insert_key_at( head_type& refHead, const K& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( base_class::insert_at( refHead, *pNode, [&f](node_type& node){ cds::unref(f)( node.m_Data ); })) +# else + insert_functor wrapper( f ); + if ( base_class::insert_at( refHead, *pNode, cds::ref(wrapper) )) +# endif + { + pNode.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace_at( head_type& refHead, K&& key, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(key), std::forward(args)... )); + } +# endif + + template + std::pair ensure_at( head_type& refHead, const K& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair ret = base_class::ensure_at( refHead, *pNode, + [&f]( bool bNew, node_type& node, node_type& ){ cds::unref(f)( bNew, node.m_Data ); }); +# else + ensure_functor wrapper( f ); + std::pair ret = base_class::ensure_at( refHead, *pNode, cds::ref(wrapper)); +# endif + if ( ret.first && ret.second ) + pNode.release(); + + return ret; + } + + template + bool erase_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::erase_at( refHead, key, cmp ); + } + + template + bool erase_at( head_type& refHead, K const& key, Compare cmp, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase_at( refHead, key, cmp, [&f]( node_type const & node ){ cds::unref(f)( const_cast(node.m_Data)); }); +# else + erase_functor wrapper( f ); + return base_class::erase_at( refHead, key, cmp, cds::ref(wrapper) ); +# endif + } + template + bool extract_at( head_type& refHead, typename gc::Guard& dest, K const& key, Compare cmp ) + { + return base_class::extract_at( refHead, dest, key, cmp ); + } + + template + bool find_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::find_at( refHead, key, cmp ); + } + + template + bool find_at( head_type& refHead, K& key, Compare cmp, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_at( refHead, key, cmp, [&f](node_type& node, K const&){ cds::unref(f)( node.m_Data ); }); +# else + find_functor wrapper( f ); + return base_class::find_at( refHead, key, cmp, cds::ref(wrapper) ); +# endif + } + + template + bool get_at( head_type& refHead, typename gc::Guard& guard, K const& key, Compare cmp ) + { + return base_class::get_at( refHead, guard, key, cmp ); + } + + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_MICHAEL_KVLIST_IMPL_H diff --git a/cds/container/michael_kvlist_nogc.h b/cds/container/michael_kvlist_nogc.h new file mode 100644 index 00000000..e814a3b7 --- /dev/null +++ b/cds/container/michael_kvlist_nogc.h @@ -0,0 +1,602 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_KVLIST_NOGC_H +#define __CDS_CONTAINER_MICHAEL_KVLIST_NOGC_H + +#include +#include +#include +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + + template + struct make_michael_kvlist_nogc: public make_michael_kvlist + { + typedef make_michael_kvlist base_maker; + typedef typename base_maker::node_type node_type; + + struct type_traits: public base_maker::type_traits + { + typedef typename base_maker::node_deallocator disposer; + }; + + typedef intrusive::MichaelList type; + }; + + } // namespace details + //@endcond + + /// Michael's ordered list (key-value pair, template specialization for gc::nogc) + /** @ingroup cds_nonintrusive_list + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of list item. + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + See \ref cds_nonintrusive_MichaelList_gc "MichaelList" for description of template parameters. + + The interface of the specialization is a little different. + */ + template < + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = michael_list::type_traits +#else + typename Traits +#endif + > + class MichaelKVList: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::MichaelList< gc::nogc, implementation_defined, Traits > +#else + protected details::make_michael_kvlist_nogc< Key, Value, Traits >::type +#endif + { + //@cond + typedef details::make_michael_kvlist_nogc< Key, Value, Traits > options; + typedef typename options::type base_class; + //@endcond + + public: +#ifdef CDS_DOXYGEN_INVOKED + typedef Key key_type ; ///< Key type + typedef Value mapped_type ; ///< Type of value stored in the list + typedef std::pair value_type ; ///< key/value pair stored in the list +#else + typedef typename options::key_type key_type; + typedef typename options::value_type mapped_type; + typedef typename options::pair_type value_type; +#endif + + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename options::key_comparator key_comparator ; ///< key comparison functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename options::cxx_allocator cxx_allocator; + typedef typename options::node_deallocator node_deallocator; + typedef typename options::type_traits::compare intrusive_key_comparator; + + typedef typename base_class::atomic_node_ptr head_type; + //@endcond + + private: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct ensure_functor + { + node_type * m_pItemFound; + + ensure_functor() + : m_pItemFound( null_ptr() ) + {} + + void operator ()(bool, node_type& item, node_type& ) + { + m_pItemFound = &item; + } + }; + + template + class find_functor: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + find_functor( Func f ) + : base_class(f) + {} + + template + void operator ()( node_type& node, Q& ) + { + base_class::get()( node.m_Data ); + } + }; +# endif + //@endcond + + protected: + //@cond + template + static node_type * alloc_node(const K& key) + { + return cxx_allocator().New( key ); + } + + template + static node_type * alloc_node( const K& key, const V& val ) + { + return cxx_allocator().New( key, val ); + } + +#ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node( K&& key, Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward(key), std::forward(args)... ); + } +#endif + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + head_type& head() + { + return base_class::m_pHead; + } + + head_type const& head() const + { + return base_class::m_pHead; + } + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& refNode ) + : iterator_base( refNode ) + {} + + explicit iterator_type( const iterator_base& it ) + : iterator_base( it ) + {} + + friend class MichaelKVList; + + protected: + explicit iterator_type( node_type& pNode ) + : iterator_base( &pNode ) + {} + + public: + typedef typename cds::details::make_const_type::reference value_ref; + typedef typename cds::details::make_const_type::pointer value_ptr; + + typedef typename cds::details::make_const_type::reference pair_ref; + typedef typename cds::details::make_const_type::pointer pair_ptr; + + iterator_type() + : iterator_base() + {} + + iterator_type( const iterator_type& src ) + : iterator_base( src ) + {} + + key_type const& key() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != null_ptr() ); + return p->m_Data.first; + } + + value_ref val() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != null_ptr() ); + return p->m_Data.second; + } + + pair_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Data) : null_ptr(); + } + + pair_ref operator *() const + { + typename iterator_base::value_ref p = iterator_base::operator *(); + return p.m_Data; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + /// Post-increment + iterator_type operator ++(int) + { + return iterator_base::operator ++(0); + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + /// Forward iterator + /** + The forward iterator for Michael's list based on gc::nogc has pre- and post-increment operators. + + The iterator interface to access item data: + - operator -> - returns a pointer to \ref value_type for iterator + - operator * - returns a reference (a const reference for \p const_iterator) to \ref value_type for iterator + - const key_type& key() - returns a key reference for iterator + - mapped_type& val() - retuns a value reference for iterator (const reference for \p const_iterator) + + For both functions the iterator should not be equal to end() + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( head() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to NULL. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator addressing the first element in a list + //@{ + const_iterator begin() const + { + return const_iterator( head() ); + } + const_iterator cbegin() + { + return const_iterator( head() ); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a list + //@{ + const_iterator end() const + { + return const_iterator(); + } + const_iterator cend() + { + return const_iterator(); + } + //@} + + protected: + //@cond + iterator node_to_iterator( node_type * pNode ) + { + if ( pNode ) + return iterator( *pNode ); + return end(); + } + //@endcond + + public: + /// Default constructor + /** + Initialize empty list + */ + MichaelKVList() + {} + + /// List desctructor + /** + Clears the list + */ + ~MichaelKVList() + { + clear(); + } + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( const K& key ) + { + return node_to_iterator( insert_at( head(), key )); + } + + /// Inserts new node with a key and a value + /** + The function creates a node with \p key and value \p val, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( const K& key, const V& val ) + { + // We cannot use insert with functor here + // because we cannot lock inserted node for updating + // Therefore, we use separate function + return node_to_iterator( insert_at( head(), key, val )); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code void func( value_type& item ); + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the list's item inserted. item.second is a reference to item's value that may be changed. + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this list's item by concurrent threads. + The user-defined functor can be passed by reference using boost::ref + and it is called only if the inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the list; + - if inserting is successful, initialize the value of item by calling \p f functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert_key( const K& key, Func func ) + { + return node_to_iterator( insert_key_at( head(), key, func )); + } + + /// Ensures that the key \p key exists in the list + /** + The operation inserts new item if the key \p key is not found in the list. + Otherwise, the function returns an iterator that points to item found. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted, \p second is true if new item has been added or \p false if the item + already is in the list. + */ + template + std::pair ensure( const K& key ) + { + std::pair< node_type *, bool > ret = ensure_at( head(), key ); + return std::make_pair( node_to_iterator( ret.first ), ret.second ); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref mapped_type constructed with std::forward(args)... + /** + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + iterator emplace( K&& key, Args&&... args ) + { + return node_to_iterator( emplace_at( head(), std::forward(key), std::forward(args)... )); + } +# endif + + /// Find the key \p key + /** \anchor cds_nonintrusive_MichaelKVList_nogc_find + + The function searches the item with key equal to \p key + and returns an iterator pointed to item found if the key is found, + and \ref end() otherwise + */ + template + iterator find( Q const& key ) + { + return node_to_iterator( find_at( head(), key, intrusive_key_comparator() ) ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_nogc_find "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + iterator find_with( Q const& key, Less pred ) + { + return node_to_iterator( find_at( head(), key, typename options::template less_wrapper::type() ) ); + } + + /// Check if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Clears the list + /** + Post-condition: the list is empty + */ + void clear() + { + base_class::clear(); + } + + protected: + //@cond + node_type * insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != null_ptr() ); + scoped_node_ptr p( pNode ); + if ( base_class::insert_at( refHead, *pNode )) + return p.release(); + return null_ptr(); + } + + template + node_type * insert_at( head_type& refHead, const K& key ) + { + return insert_node_at( refHead, alloc_node( key )); + } + + template + node_type * insert_at( head_type& refHead, const K& key, const V& val ) + { + return insert_node_at( refHead, alloc_node( key, val )); + } + + template + node_type * insert_key_at( head_type& refHead, const K& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + + if ( base_class::insert_at( refHead, *pNode )) { + cds::unref(f)( pNode->m_Data ); + return pNode.release(); + } + return null_ptr(); + } + + template + std::pair< node_type *, bool > ensure_at( head_type& refHead, const K& key ) + { + scoped_node_ptr pNode( alloc_node( key )); + node_type * pItemFound = null_ptr(); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair ret = base_class::ensure_at( refHead, *pNode, [&pItemFound](bool, node_type& item, node_type&){ pItemFound = &item; }); +# else + ensure_functor func; + std::pair ret = base_class::ensure_at( refHead, *pNode, boost::ref(func) ); + pItemFound = func.m_pItemFound; +# endif + assert( pItemFound != null_ptr() ); + + if ( ret.first && ret.second ) + pNode.release(); + return std::make_pair( pItemFound, ret.second ); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + node_type * emplace_at( head_type& refHead, K&& key, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(key), std::forward(args)... )); + } +#endif + + template + node_type * find_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::find_at( refHead, key, cmp ); + } + + /* + template + bool find_at( head_type& refHead, K& key, Compare cmp, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_at( refHead, key, cmp, [&f]( node_type& node, K const& ){ cds::unref(f)( node.m_Data ); }); +# else + find_functor wrapper( f ); + return base_class::find_at( refHead, key, cmp, cds::ref(wrapper) ); +# endif + } + */ + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_MICHAEL_KVLIST_NOGC_H diff --git a/cds/container/michael_kvlist_ptb.h b/cds/container/michael_kvlist_ptb.h new file mode 100644 index 00000000..04170ada --- /dev/null +++ b/cds/container/michael_kvlist_ptb.h @@ -0,0 +1,11 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_KVLIST_PTB_H +#define __CDS_CONTAINER_MICHAEL_KVLIST_PTB_H + +#include +#include +#include +#include + +#endif // #ifndef __CDS_CONTAINER_MICHAEL_KVLIST_PTB_H diff --git a/cds/container/michael_kvlist_rcu.h b/cds/container/michael_kvlist_rcu.h new file mode 100644 index 00000000..175df490 --- /dev/null +++ b/cds/container/michael_kvlist_rcu.h @@ -0,0 +1,929 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_KVLIST_RCU_H +#define __CDS_CONTAINER_MICHAEL_KVLIST_RCU_H + +#include +#include +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Michael's ordered list (key-value pair), template specialization for \ref cds_urcu_desc "RCU" + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_MichaelKVList_rcu + + This is key-value variation of non-intrusive \ref cds_nonintrusive_MichaelList_rcu "MichaelList". + Like standard container, this implementation split a value stored into two part - + constant key and alterable value. + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p Key - key type of an item stored in the list. It should be copy-constructible + - \p Value - value type stored in a list + - \p Traits - type traits, default is michael_list::type_traits + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + + It is possible to declare option-based list with cds::container::michael_list::make_traits metafunction istead of \p Traits template + argument. For example, the following traits-based declaration of Michael's list + \code + #include + #include + // Declare comparator for the item + struct my_compare { + int operator ()( int i1, int i2 ) + { + return i1 - i2; + } + }; + + // Declare type_traits + struct my_traits: public cds::container::michael_list::type_traits + { + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::container::MichaelKVList< cds::urcu::gc< cds::urcu::general_buffered<> >, int, int, my_traits > traits_based_list; + \endcode + + is equivalent for the following option-based list + \code + #include + #include + + // my_compare is the same + + // Declare option-based list + typedef cds::container::MichaelKVList< cds::urcu::gc< cds::urcu::general_buffered<> >, int, int, + typename cds::container::michael_list::make_traits< + cds::container::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + Template argument list \p Options of cds::container::michael_list::make_traits metafunction are: + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::allocator - the allocator used for creating and freeing list's item. Default is \ref CDS_DEFAULT_ALLOCATOR macro. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - opt::rcu_check_deadlock - a deadlock checking policy. Default is opt::v::rcu_throw_deadlock + */ + template < + typename RCU, + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = michael_list::type_traits +#else + typename Traits +#endif + > + class MichaelKVList< cds::urcu::gc, Key, Value, Traits >: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::MichaelList< cds::urcu::gc, implementation_defined, Traits > +#else + protected details::make_michael_kvlist< cds::urcu::gc, Key, Value, Traits >::type +#endif + { + //@cond + typedef details::make_michael_kvlist< cds::urcu::gc, Key, Value, Traits > options; + typedef typename options::type base_class; + //@endcond + + public: +#ifdef CDS_DOXYGEN_INVOKED + typedef Key key_type ; ///< Key type + typedef Value mapped_type ; ///< Type of value stored in the list + typedef std::pair value_type ; ///< key/value pair stored in the list +#else + typedef typename options::key_type key_type; + typedef typename options::value_type mapped_type; + typedef typename options::pair_type value_type; +#endif + + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename options::key_comparator key_comparator ; ///< key comparison functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::rcu_check_deadlock rcu_check_deadlock ; ///< RCU deadlock checking policy + + typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock + static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions require external locking + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename options::cxx_allocator cxx_allocator; + typedef typename options::node_deallocator node_deallocator; + typedef typename options::type_traits::compare intrusive_key_comparator; + + typedef typename base_class::atomic_node_ptr head_type; + //@endcond + + public: + /// pointer to extracted node + typedef cds::urcu::exempt_ptr< gc, node_type, value_type, typename options::type_traits::disposer, + cds::urcu::details::conventional_exempt_pair_cast + > exempt_ptr; + + private: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + template + class insert_functor: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + insert_functor ( Func f ) + : base_class( f ) + {} + + void operator()( node_type& node ) + { + base_class::get()( node.m_Data ); + } + }; + + template + class ensure_functor: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + ensure_functor( Func f ) + : base_class(f) + {} + + void operator ()( bool bNew, node_type& node, node_type& ) + { + base_class::get()( bNew, node.m_Data ); + } + }; + + template + class find_functor: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + find_functor( Func f ) + : base_class(f) + {} + + template + void operator ()( node_type& node, Q& ) + { + base_class::get()( node.m_Data ); + } + }; + + struct empty_find_functor + { + template + void operator ()( node_type& node, Q& val ) const + {} + }; + + template + struct erase_functor + { + Func m_func; + + erase_functor( Func f ) + : m_func( f ) + {} + + void operator ()( node_type const & node ) + { + cds::unref(m_func)( const_cast(node.m_Data) ); + } + }; +# endif // ifndef CDS_CXX11_LAMBDA_SUPPORT + //@endcond + + protected: + //@cond + template + static node_type * alloc_node(const K& key) + { + return cxx_allocator().New( key ); + } + + template + static node_type * alloc_node( const K& key, const V& val ) + { + return cxx_allocator().New( key, val ); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node( K&& key, Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward(key), std::forward(args)...); + } +# endif + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + head_type& head() + { + return base_class::m_pHead; + } + + head_type& head() const + { + return const_cast( base_class::m_pHead ); + } + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( pNode ) + {} + + friend class MichaelKVList; + + public: + typedef typename cds::details::make_const_type::reference value_ref; + typedef typename cds::details::make_const_type::pointer value_ptr; + + typedef typename cds::details::make_const_type::reference pair_ref; + typedef typename cds::details::make_const_type::pointer pair_ptr; + + iterator_type() + {} + + iterator_type( iterator_type const& src ) + : iterator_base( src ) + {} + + key_type const& key() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != null_ptr() ); + return p->m_Data.first; + } + + pair_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Data) : null_ptr(); + } + + pair_ref operator *() const + { + typename iterator_base::value_ref p = iterator_base::operator *(); + return p.m_Data; + } + + value_ref val() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != null_ptr() ); + return p->m_Data.second; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + /// Forward iterator + typedef iterator_type iterator; + + /// Const forward iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( head() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to NULL. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator addressing the first element in a list + //@{ + const_iterator begin() const + { + return const_iterator( head() ); + } + const_iterator cbegin() + { + return const_iterator( head() ); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a list + //@{ + const_iterator end() const + { + return const_iterator(); + } + const_iterator cend() + { + return const_iterator(); + } + //@} + + public: + /// Default constructor + /** + Initializes empty list + */ + MichaelKVList() + {} + + /// List desctructor + /** + Clears the list + */ + ~MichaelKVList() + { + clear(); + } + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + The function makes RCU lock internally. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( const K& key ) + { + return insert_at( head(), key ); + } + + /// Inserts new node with a key and a value + /** + The function creates a node with \p key and value \p val, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + The function makes RCU lock internally. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( const K& key, const V& val ) + { + return insert_at( head(), key, val ); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the list's item inserted. item.second is a reference to item's value that may be changed. + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this list's item by concurrent threads. + The user-defined functor can be passed by reference using boost::ref + and it is called only if inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the list; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + The function makes RCU lock internally. + */ + template + bool insert_key( const K& key, Func func ) + { + return insert_key_at( head(), key, func ); + } + + /// Ensures that the \p key exists in the list + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + is inserted into the list (note that in this case the \ref key_type should be + copy-constructible from type \p K). + Otherwise, the functor \p func is called with item found. + The functor \p Func may be a function with signature: + \code + void func( bool bNew, value_type& item ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + + The functor may change any fields of the \p item.second that is \ref mapped_type; + however, \p func must guarantee that during changing no any other modifications + could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + The function makes RCU lock internally. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( const K& key, Func f ) + { + return ensure_at( head(), key, f ); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref mapped_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + The function makes RCU lock internally. + + @note This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( K&& key, Args&&... args ) + { + return emplace_at( head(), std::forward(key), std::forward(args)... ); + } +# endif + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_MichaelKVList_rcu_erase + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns \p true if \p key is found and has been deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return erase_at( head(), key, intrusive_key_comparator() ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_rcu_erase "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( K const& key, Less pred ) + { + return erase_at( head(), key, typename options::template less_wrapper::type() ); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_MichaelKVList_rcu_erase_func + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& val) { ... } + }; + \endcode + The functor may be passed by reference with boost:ref + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + + See also: \ref erase + */ + template + bool erase( K const& key, Func f ) + { + return erase_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_rcu_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + return erase_at( head(), key, typename options::template less_wrapper::type(), f ); + } + + /// Extracts an item from the list + /** + @anchor cds_nonintrusive_MichaelKVList_rcu_extract + The function searches an item with key equal to \p key in the list, + unlinks it from the list, and returns pointer to an item found in \p dest argument. + If \p key is not found the function returns \p false. + + @note The function does NOT call RCU read-side lock or synchronization, + and does NOT dispose the item found. It just excludes the item from the list + and returns a pointer to item found. + You should lock RCU before calling this function. + + \code + #include + #include + + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::container::MichaelKVList< rcu, int, Foo > rcu_michael_list; + + rcu_michael_list theList; + // ... + + rcu_michael_list::exempt_ptr p; + { + // first, we should lock RCU + rcu_michael_list::rcu_lock sl; + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + if ( theList.extract( p, 10 )) { + // do something with p + ... + } + } + // Outside RCU lock section we may safely release extracted pointer. + // release() passes the pointer to RCU reclamation cycle. + p.release(); + \endcode + */ + template + bool extract( exempt_ptr& dest, K const& key ) + { + dest = extract_at( head(), key, intrusive_key_comparator() ); + return !dest.empty(); + } + + /// Extracts an item from the list using \p pred predicate for searching + /** + This function is the analog for \ref cds_nonintrusive_MichaelKVList_rcu_extract "extract(exempt_ptr&, K const&)". + The \p pred is a predicate used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as \ref key_comparator. + */ + template + bool extract_with( exempt_ptr& dest, K const& key, Less pred ) + { + dest = extract_at( head(), key, typename options::template less_wrapper::type() ); + return !dest.empty(); + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_MichaelKVList_rcu_find_val + + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise + + The function makes RCU lock internally. + */ + template + bool find( Q const& key ) const + { + return find_at( head(), key, intrusive_key_comparator() ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_rcu_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& key, Less pred ) const + { + return find_at( head(), key, typename options::template less_wrapper::type() ); + } + + /// Finds the key \p key and performs an action with it + /** \anchor cds_nonintrusive_MichaelKVList_rcu_find_func + The function searches an item with key equal to \p key and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change item.second that is reference to value of node. + Note that the function is only guarantee that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function makes RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q const& key, Func f ) const + { + return find_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_rcu_find_func "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& key, Less pred, Func f ) const + { + return find_at( head(), key, typename options::template less_wrapper::type(), f ); + } + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_MichaelKVList_rcu_get + The function searches the item with \p key and returns the pointer to item found. + If \p key is not found it returns \p NULL. + + Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::container::MichaelKVList< cds::urcu::gc< cds::urcu::general_buffered<> >, int, foo, my_traits > ord_list; + ord_list theList; + // ... + { + // Lock RCU + ord_list::rcu_lock lock; + + ord_list::value_type * pVal = theList.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + // Unlock RCU by rcu_lock destructor + // pVal can be freed at any time after RCU has been unlocked + } + \endcode + */ + template + value_type * get( K const& key ) const + { + return get_at( head(), key, intrusive_key_comparator()); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_rcu_get "get(K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + value_type * get_with( K const& key, Less pred ) const + { + return get_at( head(), key, typename options::template less_wrapper::type()); + } + + /// Checks if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Clears the list + /** + Post-condition: the list is empty + */ + void clear() + { + base_class::clear(); + } + + protected: + //@cond + bool insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != null_ptr() ); + scoped_node_ptr p( pNode ); + if ( base_class::insert_at( refHead, *pNode )) { + p.release(); + return true; + } + return false; + } + + template + bool insert_at( head_type& refHead, const K& key ) + { + return insert_node_at( refHead, alloc_node( key )); + } + + template + bool insert_at( head_type& refHead, const K& key, const V& val ) + { + return insert_node_at( refHead, alloc_node( key, val )); + } + + template + bool insert_key_at( head_type& refHead, const K& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( base_class::insert_at( refHead, *pNode, [&f](node_type& node){ cds::unref(f)( node.m_Data ); })) +# else + insert_functor wrapper( f ); + if ( base_class::insert_at( refHead, *pNode, cds::ref(wrapper) )) +# endif + { + pNode.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace_at( head_type& refHead, K&& key, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(key), std::forward(args)... )); + } +# endif + + template + std::pair ensure_at( head_type& refHead, const K& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair ret = base_class::ensure_at( refHead, *pNode, + [&f]( bool bNew, node_type& node, node_type& ){ cds::unref(f)( bNew, node.m_Data ); }); +# else + ensure_functor wrapper( f ); + std::pair ret = base_class::ensure_at( refHead, *pNode, cds::ref(wrapper)); +# endif + if ( ret.first && ret.second ) + pNode.release(); + + return ret; + } + + template + bool erase_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::erase_at( refHead, key, cmp ); + } + + template + bool erase_at( head_type& refHead, K const& key, Compare cmp, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase_at( refHead, key, cmp, [&f]( node_type const & node ){ cds::unref(f)( const_cast(node.m_Data)); }); +# else + erase_functor wrapper( f ); + return base_class::erase_at( refHead, key, cmp, cds::ref(wrapper) ); +# endif + } + + template + node_type * extract_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::extract_at( refHead, key, cmp ); + } + + template + bool find_at( head_type& refHead, K const& key, Compare cmp ) const + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_at( refHead, key, cmp, [](node_type&, K const&) {} ); +# else + return base_class::find_at( refHead, key, cmp, empty_find_functor() ); +# endif + } + + template + bool find_at( head_type& refHead, K& key, Compare cmp, Func f ) const + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_at( refHead, key, cmp, [&f](node_type& node, K const&){ cds::unref(f)( node.m_Data ); }); +# else + find_functor wrapper( f ); + return base_class::find_at( refHead, key, cmp, cds::ref(wrapper) ); +# endif + } + + template + value_type * get_at( head_type& refHead, K const& val, Compare cmp ) const + { + node_type * pNode = base_class::get_at( refHead, val, cmp ); + return pNode ? &pNode->m_Data : null_ptr(); + } + + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_MICHAEL_KVLIST_RCU_H diff --git a/cds/container/michael_list_base.h b/cds/container/michael_list_base.h new file mode 100644 index 00000000..78099ceb --- /dev/null +++ b/cds/container/michael_list_base.h @@ -0,0 +1,112 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_LIST_BASE_H +#define __CDS_CONTAINER_MICHAEL_LIST_BASE_H + +#include +#include +#include + +namespace cds { namespace container { + + /// MichaelList ordered list related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace michael_list { + /// Michael list default type traits + struct type_traits + { + typedef CDS_DEFAULT_ALLOCATOR allocator ; ///< allocator used to allocate new node + + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// specifies binary predicate used for key comparison. + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// back-off strategy used + /** + If the option is not specified, the cds::backoff::empty is used. + */ + typedef cds::backoff::empty back_off; + + /// Item counter + /** + The type for item counting feature. + Default is no item counter (\ref atomicity::empty_item_counter) + */ + typedef atomicity::empty_item_counter item_counter; + + /// Link fields checking feature + /** + Default is \ref intrusive::opt::debug_check_link + */ + static const opt::link_check_type link_checker = opt::debug_check_link; + + /// C++ memory ordering model + /** + List of available memory ordering see opt::memory_model + */ + typedef opt::v::relaxed_ordering memory_model; + + /// RCU deadlock checking policy (only for \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList") + /** + List of available options see opt::rcu_check_deadlock + */ + typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; + + //@cond + // MichaelKVList: supporting for split-ordered list + // key accessor (opt::none = internal key type is equal to user key type) + typedef opt::none key_accessor; + //@endcond + }; + + /// Metafunction converting option list to MichaelList traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + + See \ref MichaelList, \ref type_traits, \ref cds::opt::make_options. + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS9 >::type + ,CDS_OPTIONS9 + >::type type; +#endif + }; + + + } // namespace michael_list + + // Forward declarations + template + class MichaelList; + + template + class MichaelKVList; + + // Tag for selecting Michael's list implementation + /** + This struct is empty and it is used only as a tag for selecting MichaelList + as ordered list implementation in declaration of some classes. + + See split_list::type_traits::ordered_list as an example. + */ + struct michael_list_tag + {}; + +}} // namespace cds::container + + +#endif // #ifndef __CDS_CONTAINER_MICHAEL_LIST_BASE_H diff --git a/cds/container/michael_list_hp.h b/cds/container/michael_list_hp.h new file mode 100644 index 00000000..f1ae1a8d --- /dev/null +++ b/cds/container/michael_list_hp.h @@ -0,0 +1,11 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_LIST_HP_H +#define __CDS_CONTAINER_MICHAEL_LIST_HP_H + +#include +#include +#include +#include + +#endif // #ifndef __CDS_CONTAINER_MICHAEL_LIST_HP_H diff --git a/cds/container/michael_list_hrc.h b/cds/container/michael_list_hrc.h new file mode 100644 index 00000000..01cb993b --- /dev/null +++ b/cds/container/michael_list_hrc.h @@ -0,0 +1,11 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_LIST_HRC_H +#define __CDS_CONTAINER_MICHAEL_LIST_HRC_H + +#include +#include +#include +#include + +#endif // #ifndef __CDS_CONTAINER_MICHAEL_LIST_HRC_H diff --git a/cds/container/michael_list_impl.h b/cds/container/michael_list_impl.h new file mode 100644 index 00000000..739bbc58 --- /dev/null +++ b/cds/container/michael_list_impl.h @@ -0,0 +1,935 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_LIST_IMPL_H +#define __CDS_CONTAINER_MICHAEL_LIST_IMPL_H + +#include +#include + +namespace cds { namespace container { + + /// Michael's ordered list + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_MichaelList_gc + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Source: + - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" + + This class is non-intrusive version of cds::intrusive::MichaelList class + + Template arguments: + - \p GC - garbage collector used + - \p T - type stored in the list. The type must be default- and copy-constructible. + - \p Traits - type traits, default is michael_list::type_traits + + Unlike standard container, this implementation does not divide type \p T into key and value part and + may be used as a main building block for hash set algorithms. + The key is a function (or a part) of type \p T, and this function is specified by Traits::compare functor + or Traits::less predicate + + MichaelKVList is a key-value version of Michael's non-intrusive list that is closer to the C++ std library approach. + + It is possible to declare option-based list with cds::container::michael_list::make_traits metafunction istead of \p Traits template + argument. For example, the following traits-based declaration of gc::HP Michael's list + \code + #include + // Declare comparator for the item + struct my_compare { + int operator ()( int i1, int i2 ) + { + return i1 - i2; + } + }; + + // Declare type_traits + struct my_traits: public cds::container::michael_list::type_traits + { + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::container::MichaelList< cds::gc::HP, int, my_traits > traits_based_list; + \endcode + + is equivalent for the following option-based list + \code + #include + + // my_compare is the same + + // Declare option-based list + typedef cds::container::MichaelList< cds::gc::HP, int, + typename cds::container::michael_list::make_traits< + cds::container::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + Template argument list \p Options of cds::container::michael_list::make_traits metafunction are: + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::allocator - the allocator used for creating and freeing list's item. Default is \ref CDS_DEFAULT_ALLOCATOR macro. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + + \par Usage + There are different specializations of this template for each garbage collecting schema used. + You should include appropriate .h-file depending on GC you are using: + - for gc::HP: \code #include \endcode + - for gc::PTB: \code #include \endcode + - for gc::HRC: \code #include \endcode + - for \ref cds_urcu_desc "RCU": \code #include \endcode + - for gc::nogc: \code #include \endcode + */ + template < + typename GC, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = michael_list::type_traits +#else + typename Traits +#endif + > + class MichaelList: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::MichaelList< GC, T, Traits > +#else + protected details::make_michael_list< GC, T, Traits >::type +#endif + { + //@cond + typedef details::make_michael_list< GC, T, Traits > options; + typedef typename options::type base_class; + //@endcond + + public: + typedef T value_type ; ///< Type of value stored in the list + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename options::key_comparator key_comparator ; ///< key comparison functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename options::cxx_allocator cxx_allocator; + typedef typename options::node_deallocator node_deallocator; + typedef typename options::type_traits::compare intrusive_key_comparator; + + typedef typename base_class::atomic_node_ptr head_type; +# ifndef CDS_CXX11_LAMBDA_SUPPORT + typedef typename base_class::empty_erase_functor empty_erase_functor; +# endif + //@endcond + + public: + /// Guarded pointer + typedef cds::gc::guarded_ptr< gc, node_type, value_type, details::guarded_ptr_cast_set > guarded_ptr; + + private: + //@cond + static value_type& node_to_value( node_type& n ) + { + return n.m_Value; + } + static value_type const& node_to_value( node_type const& n ) + { + return n.m_Value; + } + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + template + struct insert_functor + { + Func m_func; + + insert_functor ( Func f ) + : m_func(f) + {} + + void operator()( node_type& node ) + { + cds::unref(m_func)( node_to_value(node) ); + } + }; + + template + struct ensure_functor + { + Func m_func; + Q const& m_arg; + + ensure_functor( Q const& arg, Func f ) + : m_func(f) + , m_arg( arg ) + {} + + void operator ()( bool bNew, node_type& node, node_type& ) + { + cds::unref(m_func)( bNew, node_to_value(node), m_arg ); + } + }; + + template + struct find_functor + { + Func m_func; + + find_functor( Func f ) + : m_func(f) + {} + + template + void operator ()( node_type& node, Q& val ) + { + cds::unref(m_func)( node_to_value(node), val ); + } + }; + + template + struct erase_functor + { + Func m_func; + + erase_functor( Func f ) + : m_func(f) + {} + + void operator()( node_type const& node ) + { + cds::unref(m_func)( node_to_value(node) ); + } + }; +#endif // ifndef CDS_CXX11_LAMBDA_SUPPORT + //@endcond + + protected: + //@cond + template + static node_type * alloc_node( Q const& v ) + { + return cxx_allocator().New( v ); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward(args)... ); + } +# endif + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + head_type& head() + { + return base_class::m_pHead; + } + + head_type const& head() const + { + return base_class::m_pHead; + } + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( pNode ) + {} + + friend class MichaelList; + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + {} + + iterator_type( iterator_type const& src ) + : iterator_base( src ) + {} + + value_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Value) : reinterpret_cast(NULL); + } + + value_ref operator *() const + { + return (iterator_base::operator *()).m_Value; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + /// Forward iterator + /** + The forward iterator for Michael's list has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. + For some GC (gc::HP, gc::HRC), a guard is limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if a limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary since it contains GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the list. + + Therefore, the use of iterators in concurrent environment is not good idea. Use the iterator on the concurrent container + for debug purpose only. + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( head() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to NULL. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator addressing the first element in a list + //@{ + const_iterator begin() const + { + return const_iterator( head() ); + } + const_iterator cbegin() + { + return const_iterator( head() ); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a list + //@{ + const_iterator end() const + { + return const_iterator(); + } + const_iterator cend() + { + return const_iterator(); + } + //@} + + public: + /// Default constructor + /** + Initialize empty list + */ + MichaelList() + {} + + /// List destructor + /** + Clears the list + */ + ~MichaelList() + { + clear(); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the list. + + The type \p Q should contain as minimum the complete key of the node. + The object of \ref value_type should be constructible from \p val of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + return insert_at( head(), val ); + } + + /// Inserts new node + /** + This function inserts new node with default-constructed value and then it calls + \p func functor with signature + \code void func( value_type& itemValue ) ;\endcode + + The argument \p itemValue of user-defined functor \p func is the reference + to the list's item inserted. User-defined functor \p func should guarantee that during changing + item's value no any other changes could be made on this list's item by concurrent threads. + The user-defined functor can be passed by reference using boost::ref + and it is called only if the inserting is success. + + The type \p Q should contain the complete key of the node. + The object of \ref value_type should be constructible from \p key of type \p Q. + + The function allows to split creating of new item into two part: + - create item from \p key with initializing key-fields only; + - insert new item into the list; + - if inserting is successful, initialize non-key fields of item by calling \p f functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + */ + template + bool insert( Q const& key, Func func ) + { + return insert_at( head(), key, func ); + } + + /// Ensures that the \p key exists in the list + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + is inserted into the list. Otherwise, the functor \p func is called with the item found. + The functor \p Func should be a function with signature: + \code + void func( bool bNew, value_type& item, const Q& val ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p key passed into the \p ensure function + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( Q const& key, Func f ) + { + return ensure_at( head(), key, f ); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + return emplace_at( head(), std::forward(args)... ); + } +# endif + + /// Delete \p key from the list + /** \anchor cds_nonintrusive_MichealList_hp_erase_val + Since the key of MichaelList's item type \p T is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_at( head(), key, intrusive_key_comparator(), [](value_type const&){} ); +# else + return erase_at( head(), key, intrusive_key_comparator(), empty_erase_functor() ); +# endif + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichealList_hp_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& key, Less pred ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_at( head(), key, typename options::template less_wrapper::type(), [](value_type const&){} ); +# else + return erase_at( head(), key, typename options::template less_wrapper::type(), empty_erase_functor() ); +# endif + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_MichaelList_hp_erase_func + The function searches an item with key \p key, calls \p f functor with item found + and deletes it. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(const value_type& val) { ... } + }; + \endcode + The functor may be passed by reference with boost:ref + + Since the key of MichaelList's item type \p T is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + + See also: \ref erase + */ + template + bool erase( Q const& key, Func f ) + { + return erase_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_hp_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + return erase_at( head(), key, typename options::template less_wrapper::type(), f ); + } + + /// Extracts the item from the list with specified \p key + /** \anchor cds_nonintrusive_MichaelList_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the list, and returns it in \p dest parameter. + If the item with key equal to \p key is not found the function returns \p false. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::MichaelList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp; + theList.extract( gp, 5 ); + // Deal with gp + // ... + + // Destructor of gp releases internal HP guard and frees the item + } + \endcode + */ + template + bool extract( guarded_ptr& dest, Q const& key ) + { + return extract_at( head(), dest.guard(), key, intrusive_key_comparator() ); + } + + /// Extracts the item from the list with comparing functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_hp_extract "extract(guarded_ptr&, Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool extract_with( guarded_ptr& dest, Q const& key, Less pred ) + { + return extract_at( head(), dest.guard(), key, typename options::template less_wrapper::type() ); + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_MichaelList_hp_find_val + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise + */ + template + bool find( Q const& key ) + { + return find_at( head(), key, intrusive_key_comparator() ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_hp_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& key, Less pred ) + { + return find_at( head(), key, typename options::template less_wrapper::type() ); + } + + /// Find the key \p val and perform an action with it + /** \anchor cds_nonintrusive_MichaelList_hp_find_func + The function searches an item with key equal to \p val and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the function is only guarantee + that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return find_at( head(), val, intrusive_key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_hp_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { + return find_at( head(), val, typename options::template less_wrapper::type(), f ); + } + + /// Find the key \p val and perform an action with it + /** \anchor cds_nonintrusive_MichaelList_hp_find_cfunc + The function searches an item with key equal to \p val and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the function is only guarantee + that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return find_at( head(), val, intrusive_key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_hp_find_cfunc "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { + return find_at( head(), val, typename options::template less_wrapper::type(), f ); + } + + /// Finds the key \p val and return the item found + /** \anchor cds_nonintrusive_MichaelList_hp_get + The function searches the item with key equal to \p val + and assigns the item found to guarded pointer \p ptr. + The function returns \p true if \p val is found, and \p false otherwise. + If \p val is not found the \p ptr parameter is not changed. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::MichaelList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp; + if ( theList.get( gp, 5 )) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard and frees the item + } + \endcode + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool get( guarded_ptr& ptr, Q const& val ) + { + return get_at( head(), ptr.guard(), val, intrusive_key_comparator() ); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_hp_get "get( guarded_ptr& ptr, Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool get_with( guarded_ptr& ptr, Q const& val, Less pred ) + { + return get_at( head(), ptr.guard(), val, typename options::template less_wrapper::type() ); + } + + /// Check if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomics::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Clears the list + /** + Post-condition: the list is empty + */ + void clear() + { + base_class::clear(); + } + + protected: + //@cond + bool insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != NULL ); + scoped_node_ptr p(pNode); + if ( base_class::insert_at( refHead, *pNode )) { + p.release(); + return true; + } + + return false; + } + + template + bool insert_at( head_type& refHead, Q const& val ) + { + return insert_node_at( refHead, alloc_node( val )); + } + + template + bool insert_at( head_type& refHead, Q const& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT +# ifdef CDS_BUG_STATIC_MEMBER_IN_LAMBDA + // GCC 4.5,4.6,4.7: node_to_value is unaccessible from lambda, + // like as MichaelList::node_to_value that requires to capture *this* despite on node_to_value is static function + value_type& (* n2v)( node_type& ) = node_to_value; + if ( base_class::insert_at( refHead, *pNode, [&f, n2v]( node_type& node ) { cds::unref(f)( n2v(node) ); } )) +# else + if ( base_class::insert_at( refHead, *pNode, [&f]( node_type& node ) { cds::unref(f)( node_to_value(node) ); } )) +# endif +# else + insert_functor wrapper( f ); + if ( base_class::insert_at( refHead, *pNode, cds::ref(wrapper) )) +# endif + { + pNode.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace_at( head_type& refHead, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(args) ... )); + } +# endif + + template + bool erase_at( head_type& refHead, Q const& key, Compare cmp, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT +# ifdef CDS_BUG_STATIC_MEMBER_IN_LAMBDA + // GCC 4.5-4.7: node_to_value is unaccessible from lambda, + // like as MichaelList::node_to_value that requires to capture *this* despite on node_to_value is static function + value_type const& (* n2v)( node_type const& ) = node_to_value; + return base_class::erase_at( refHead, key, cmp, [&f,n2v](node_type const& node){ cds::unref(f)( n2v(node) ); } ); +# else + return base_class::erase_at( refHead, key, cmp, [&f](node_type const& node){ cds::unref(f)( node_to_value(node) ); } ); +# endif +# else + erase_functor wrapper( f ); + return base_class::erase_at( refHead, key, cmp, cds::ref(wrapper) ); +# endif + } + + template + bool extract_at( head_type& refHead, typename gc::Guard& dest, Q const& key, Compare cmp ) + { + return base_class::extract_at( refHead, dest, key, cmp ); + } + + template + std::pair ensure_at( head_type& refHead, Q const& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT +# ifdef CDS_BUG_STATIC_MEMBER_IN_LAMBDA + // GCC 4.5-4.7: node_to_value is unaccessible from lambda, + // like as MichaelList::node_to_value that requires to capture *this* despite on node_to_value is static function + value_type& (* n2v)( node_type& ) = node_to_value; + std::pair ret = base_class::ensure_at( refHead, *pNode, + [&f, &key, n2v](bool bNew, node_type& node, node_type&){ cds::unref(f)( bNew, n2v(node), key ); }); +# else + std::pair ret = base_class::ensure_at( refHead, *pNode, + [&f, &key](bool bNew, node_type& node, node_type&){ cds::unref(f)( bNew, node_to_value(node), key ); }); +# endif +# else + ensure_functor wrapper( key, f ); + std::pair ret = base_class::ensure_at( refHead, *pNode, cds::ref(wrapper)); +# endif + if ( ret.first && ret.second ) + pNode.release(); + + return ret; + } + + template + bool find_at( head_type& refHead, Q const& key, Compare cmp ) + { + return base_class::find_at( refHead, key, cmp ); + } + + template + bool find_at( head_type& refHead, Q& val, Compare cmp, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT +# ifdef CDS_BUG_STATIC_MEMBER_IN_LAMBDA + // GCC 4.5-4.7: node_to_value is unaccessible from lambda, + // like as MichaelList::node_to_value that requires to capture *this* despite on node_to_value is static function + value_type& (* n2v)( node_type& ) = node_to_value; + return base_class::find_at( refHead, val, cmp, [&f, n2v](node_type& node, Q& v){ cds::unref(f)( n2v(node), v ); }); +# else + return base_class::find_at( refHead, val, cmp, [&f](node_type& node, Q& v){ cds::unref(f)( node_to_value(node), v ); }); +# endif +# else + find_functor wrapper( f ); + return base_class::find_at( refHead, val, cmp, cds::ref(wrapper) ); +# endif + } + + template + bool get_at( head_type& refHead, typename gc::Guard& guard, Q const& key, Compare cmp ) + { + return base_class::get_at( refHead, guard, key, cmp ); + } + + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_MICHAEL_LIST_IMPL_H diff --git a/cds/container/michael_list_nogc.h b/cds/container/michael_list_nogc.h new file mode 100644 index 00000000..bd07cff4 --- /dev/null +++ b/cds/container/michael_list_nogc.h @@ -0,0 +1,450 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_LIST_NOGC_H +#define __CDS_CONTAINER_MICHAEL_LIST_NOGC_H + +#include +#include +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + + template + struct make_michael_list_nogc: public make_michael_list + { + typedef make_michael_list base_maker; + typedef typename base_maker::node_type node_type; + + struct type_traits: public base_maker::type_traits + { + typedef typename base_maker::node_deallocator disposer; + }; + + typedef intrusive::MichaelList type; + }; + + } // namespace details + //@endcond + + /// Michael's lock-free ordered single-linked list (template specialization for gc::nogc) + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_MichaelList_nogc + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of list item. + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + See \ref cds_nonintrusive_MichaelList_gc "MichaelList" for description of template parameters. + + The interface of the specialization is a little different. + */ + template + class MichaelList: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::MichaelList< gc::nogc, T, Traits > +#else + protected details::make_michael_list_nogc< T, Traits >::type +#endif + { + //@cond + typedef details::make_michael_list_nogc< T, Traits > options; + typedef typename options::type base_class; + //@endcond + + public: + typedef T value_type ; ///< Type of value stored in the list + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename options::key_comparator key_comparator ; ///< key comparison functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename options::cxx_allocator cxx_allocator; + typedef typename options::node_deallocator node_deallocator; + typedef typename options::type_traits::compare intrusive_key_comparator; + + typedef typename base_class::atomic_node_ptr head_type; + //@endcond + + protected: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct ensure_functor + { + node_type * m_pItemFound; + + ensure_functor() + : m_pItemFound( NULL ) + {} + + void operator ()(bool, node_type& item, node_type& ) + { + m_pItemFound = &item; + } + }; +# endif + //@endcond + + protected: + //@cond + static node_type * alloc_node() + { + return cxx_allocator().New(); + } + + static node_type * alloc_node( value_type const& v ) + { + return cxx_allocator().New( v ); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward(args)... ); + } +# endif + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + head_type& head() + { + return base_class::m_pHead; + } + + head_type const& head() const + { + return base_class::m_pHead; + } + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( pNode ) + {} + + explicit iterator_type( const iterator_base& it ) + : iterator_base( it ) + {} + + friend class MichaelList; + + protected: + explicit iterator_type( node_type& pNode ) + : iterator_base( &pNode ) + {} + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + {} + + iterator_type( const iterator_type& src ) + : iterator_base( src ) + {} + + value_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Value) : reinterpret_cast(NULL); + } + + value_ref operator *() const + { + return (iterator_base::operator *()).m_Value; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + /// Post-increment + iterator_type operator ++(int) + { + return iterator_base::operator ++(0); + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( head() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to NULL. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator addressing the first element in a list + //@{ + const_iterator begin() const + { + return const_iterator( head() ); + } + const_iterator cbegin() + { + return const_iterator( head() ); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a list + //@{ + const_iterator end() const + { + return const_iterator(); + } + const_iterator cend() + { + return const_iterator(); + } + //@} + + protected: + //@cond + iterator node_to_iterator( node_type * pNode ) + { + if ( pNode ) + return iterator( *pNode ); + return end(); + } + //@endcond + + public: + /// Default constructor + /** + Initialize empty list + */ + MichaelList() + {} + + /// List desctructor + /** + Clears the list + */ + ~MichaelList() + { + clear(); + } + + /// Inserts new node + /** + The function inserts \p val in the list if the list does not contain + an item with key equal to \p val. + + Return an iterator pointing to inserted item if success \ref end() otherwise + */ + template + iterator insert( const Q& val ) + { + return node_to_iterator( insert_at( head(), val ) ); + } + + /// Ensures that the item \p val exists in the list + /** + The operation inserts new item if the key \p val is not found in the list. + Otherwise, the function returns an iterator that points to item found. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted, \p second is true if new item has been added or \p false if the item + already is in the list. + */ + template + std::pair ensure( const Q& val ) + { + std::pair< node_type *, bool > ret = ensure_at( head(), val ); + return std::make_pair( node_to_iterator( ret.first ), ret.second ); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Return an iterator pointing to inserted item if success \ref end() otherwise + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + iterator emplace( Args&&... args ) + { + return node_to_iterator( emplace_at( head(), std::forward(args)... )); + } +# endif + + /// Find the key \p val + /** \anchor cds_nonintrusive_MichaelList_nogc_find_val + The function searches the item with key equal to \p val + and returns an iterator pointed to item found if the key is found, + and \ref end() otherwise + */ + template + iterator find( Q const& key ) + { + return node_to_iterator( find_at( head(), key, intrusive_key_comparator() )); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_nogc_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + iterator find_with( Q const& key, Less pred ) + { + return node_to_iterator( find_at( head(), key, typename options::template less_wrapper::type() )); + } + + /// Check if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomics::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Clears the list + /** + Post-condition: the list is empty + */ + void clear() + { + base_class::clear(); + } + + protected: + //@cond + node_type * insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != null_ptr() ); + scoped_node_ptr p(pNode); + if ( base_class::insert_at( refHead, *pNode )) + return p.release(); + + return null_ptr(); + } + + template + node_type * insert_at( head_type& refHead, const Q& val ) + { + return insert_node_at( refHead, alloc_node( val )); + } + + template + std::pair< node_type *, bool > ensure_at( head_type& refHead, const Q& val ) + { + scoped_node_ptr pNode( alloc_node( val )); + node_type * pItemFound = null_ptr(); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair ret = base_class::ensure_at( refHead, *pNode, [&pItemFound](bool, node_type& item, node_type&) { pItemFound = &item; }); +# else + ensure_functor func; + std::pair ret = base_class::ensure_at( refHead, *pNode, boost::ref(func) ); + pItemFound = func.m_pItemFound; +# endif + assert( pItemFound != null_ptr() ); + + if ( ret.first && ret.second ) + pNode.release(); + return std::make_pair( pItemFound, ret.second ); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + node_type * emplace_at( head_type& refHead, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(args)...)); + } +# endif + + template + node_type * find_at( head_type& refHead, Q const& key, Compare cmp ) + { + return base_class::find_at( refHead, key, cmp ); + } + + //@endcond + }; +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_MICHAEL_LIST_NOGC_H diff --git a/cds/container/michael_list_ptb.h b/cds/container/michael_list_ptb.h new file mode 100644 index 00000000..b1f3bfbf --- /dev/null +++ b/cds/container/michael_list_ptb.h @@ -0,0 +1,11 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_LIST_PTB_H +#define __CDS_CONTAINER_MICHAEL_LIST_PTB_H + +#include +#include +#include +#include + +#endif // #ifndef __CDS_CONTAINER_MICHAEL_LIST_PTB_H diff --git a/cds/container/michael_list_rcu.h b/cds/container/michael_list_rcu.h new file mode 100644 index 00000000..f16a487f --- /dev/null +++ b/cds/container/michael_list_rcu.h @@ -0,0 +1,967 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_LIST_RCU_H +#define __CDS_CONTAINER_MICHAEL_LIST_RCU_H + +#include +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Michael's ordered list (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_MichaelList_rcu + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Source: + - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" + + This class is non-intrusive version of \ref cds_intrusive_MichaelList_rcu "cds::intrusive::MichaelList" RCU specialization. + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p T - type stored in the list. The type must be default- and copy-constructible. + - \p Traits - type traits, default is michael_list::type_traits + + The implementation does not divide type \p T into key and value part and + may be used as a main building block for hash set containers. + The key is a function (or a part) of type \p T, and this function is specified by Traits::compare functor + or Traits::less predicate. + + \ref cds_nonintrusive_MichaelKVList_rcu "MichaelKVList" is a key-value version of Michael's + non-intrusive list that is closer to the C++ std library approach. + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + + It is possible to declare option-based list with cds::container::michael_list::make_traits metafunction istead of \p Traits template + argument. For example, the following traits-based declaration of Michael's list + + \code + #include + #include + // Declare comparator for the item + struct my_compare { + int operator ()( int i1, int i2 ) + { + return i1 - i2; + } + }; + + // Declare type_traits + struct my_traits: public cds::container::michael_list::type_traits + { + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::container::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, int, my_traits > traits_based_list; + \endcode + + is equivalent for the following option-based list + \code + #include + #include + + // my_compare is the same + + // Declare option-based list + typedef cds::container::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, int, + typename cds::container::michael_list::make_traits< + cds::container::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + Template argument list \p Options of cds::container::michael_list::make_traits metafunction are: + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::allocator - the allocator used for creating and freeing list's item. Default is \ref CDS_DEFAULT_ALLOCATOR macro. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - opt::rcu_check_deadlock - a deadlock checking policy. Default is opt::v::rcu_throw_deadlock + */ + template < + typename RCU, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = michael_list::type_traits +#else + typename Traits +#endif + > + class MichaelList< cds::urcu::gc, T, Traits > : +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::MichaelList< cds::urcu::gc, T, Traits > +#else + protected details::make_michael_list< cds::urcu::gc, T, Traits >::type +#endif + { + //@cond + typedef details::make_michael_list< cds::urcu::gc, T, Traits > options; + typedef typename options::type base_class; + //@endcond + + public: + typedef T value_type ; ///< Type of value stored in the list + typedef typename base_class::gc gc ; ///< RCU schema used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename options::key_comparator key_comparator ; ///< key comparison functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::rcu_check_deadlock rcu_check_deadlock ; ///< RCU deadlock checking policy + + typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock + static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions require external locking + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename options::cxx_allocator cxx_allocator; + typedef typename options::node_deallocator node_deallocator; + typedef typename options::type_traits::compare intrusive_key_comparator; + + typedef typename base_class::atomic_node_ptr head_type; +# ifndef CDS_CXX11_LAMBDA_SUPPORT + typedef typename base_class::empty_erase_functor empty_erase_functor; +# endif + //@endcond + + public: + typedef cds::urcu::exempt_ptr< gc, node_type, value_type, typename options::type_traits::disposer > exempt_ptr; ///< pointer to extracted node + + private: + //@cond + static value_type& node_to_value( node_type& n ) + { + return n.m_Value; + } + static value_type const& node_to_value( node_type const& n ) + { + return n.m_Value; + } + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + template + struct insert_functor + { + Func m_func; + + insert_functor ( Func f ) + : m_func(f) + {} + + void operator()( node_type& node ) + { + cds::unref(m_func)( node_to_value(node) ); + } + }; + + template + struct ensure_functor + { + Func m_func; + Q const& m_arg; + + ensure_functor( Q const& arg, Func f ) + : m_func(f) + , m_arg( arg ) + {} + + void operator ()( bool bNew, node_type& node, node_type& ) + { + cds::unref(m_func)( bNew, node_to_value(node), m_arg ); + } + }; + + template + struct find_functor + { + Func m_func; + + find_functor( Func f ) + : m_func(f) + {} + + template + void operator ()( node_type& node, Q& val ) + { + cds::unref(m_func)( node_to_value(node), val ); + } + }; + + struct empty_find_functor + { + template + void operator ()( node_type& node, Q& val ) const + {} + }; + + template + struct erase_functor + { + Func m_func; + + erase_functor( Func f ) + : m_func(f) + {} + + void operator()( node_type const& node ) + { + cds::unref(m_func)( node_to_value(node) ); + } + }; +#endif // ifndef CDS_CXX11_LAMBDA_SUPPORT + //@endcond + + protected: + //@cond + template + static node_type * alloc_node( Q const& v ) + { + return cxx_allocator().New( v ); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward(args)... ); + } +# endif + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + head_type& head() + { + return base_class::m_pHead; + } + + head_type& head() const + { + return const_cast( base_class::m_pHead ); + } + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( pNode ) + {} + + friend class MichaelList; + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + {} + + iterator_type( iterator_type const& src ) + : iterator_base( src ) + {} + + value_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Value) : reinterpret_cast(NULL); + } + + value_ref operator *() const + { + return (iterator_base::operator *()).m_Value; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + /// Forward iterator + typedef iterator_type iterator; + + /// Const forward iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( head() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to NULL. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator addressing the first element in a list + //@{ + const_iterator begin() const + { + return const_iterator( head() ); + } + const_iterator cbegin() + { + return const_iterator( head() ); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a list + //@{ + const_iterator end() const + { + return const_iterator(); + } + const_iterator cend() + { + return const_iterator(); + } + //@} + + public: + /// Default constructor + /** + Initialize empty list + */ + MichaelList() + {} + + /// List destructor + /** + Clears the list + */ + ~MichaelList() + { + clear(); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the list. + + The type \p Q should contain as minimum the complete key of the node. + The object of \ref value_type should be constructible from \p val of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + The function makes RCU lock internally. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + return insert_at( head(), val ); + } + + /// Inserts new node + /** + This function inserts new node with default-constructed value and then it calls + \p func functor with signature + \code void func( value_type& itemValue ) ;\endcode + + The argument \p itemValue of user-defined functor \p func is the reference + to the list's item inserted. User-defined functor \p func should guarantee that during changing + item's value no any other changes could be made on this list's item by concurrent threads. + The user-defined functor can be passed by reference using boost::ref + and it is called only if the inserting is success. + + The type \p Q should contain the complete key of the node. + The object of \ref value_type should be constructible from \p key of type \p Q. + + The function allows to split creating of new item into two part: + - create item from \p key with initializing key-fields only; + - insert new item into the list; + - if inserting is successful, initialize non-key fields of item by calling \p f functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + The function makes RCU lock internally. + */ + template + bool insert( Q const& key, Func func ) + { + return insert_at( head(), key, func ); + } + + /// Ensures that the \p key exists in the list + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + is inserted into the list. Otherwise, the functor \p func is called with the item found. + The functor \p Func should be a function with signature: + \code + void func( bool bNew, value_type& item, const Q& val ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p key passed into the \p ensure function + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + The function makes RCU lock internally. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( Q const& key, Func f ) + { + return ensure_at( head(), key, f ); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + The function makes RCU lock internally. + + @note This function is available only for compiler that supports + variadic template and move semantics. + */ + template + bool emplace( Args&&... args ) + { + return emplace_at( head(), std::forward(args)... ); + } +# endif + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_MichealList_rcu_erase_val + Since the key of MichaelList's item type \p T is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the value \p key of type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_at( head(), key, intrusive_key_comparator(), [](value_type const&){} ); +# else + return erase_at( head(), key, intrusive_key_comparator(), empty_erase_functor() ); +# endif + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichealList_rcu_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& key, Less pred ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_at( head(), key, typename options::template less_wrapper::type(), [](value_type const&){} ); +# else + return erase_at( head(), key, typename options::template less_wrapper::type(), empty_erase_functor() ); +# endif + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_MichaelList_rcu_erase_func + The function searches an item with key \p key, calls \p f functor with item found + and deletes it. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(const value_type& val) { ... } + }; + \endcode + The functor may be passed by reference with boost:ref + + Since the key of MichaelList's item type \p T is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { + return erase_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_rcu_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + return erase_at( head(), key, typename options::template less_wrapper::type(), f ); + } + + /// Extracts an item from the list + /** + @anchor cds_nonintrusive_MichaelList_rcu_extract + The function searches an item with key equal to \p val in the list, + unlinks it from the list, and returns pointer to an item found in \p dest argument. + If the item with the key equal to \p val is not found the function returns \p false. + + @note The function does NOT call RCU read-side lock or synchronization, + and does NOT dispose the item found. It just excludes the item from the list + and returns a pointer to item found. + You should lock RCU before calling this function. + + \code + #include + #include + + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::container::MichaelList< rcu, Foo > rcu_michael_list; + + rcu_michael_list theList; + // ... + + rcu_michael_list::exempt_ptr p; + { + // first, we should lock RCU + rcu::scoped_lock sl; + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + if ( theList.extract( p, 10 )) { + // do something with p + ... + } + } + // Outside RCU lock section we may safely release extracted pointer. + // release() passes the pointer to RCU reclamation cycle. + p.release(); + \endcode + */ + template + bool extract( exempt_ptr& dest, Q const& val ) + { + dest = extract_at( head(), val, intrusive_key_comparator() ); + return !dest.empty(); + } + + /// Extracts an item from the list using \p pred predicate for searching + /** + This function is the analog for \ref cds_nonintrusive_MichaelList_rcu_extract "extract(exempt_ptr&, Q const&)". + + The \p pred is a predicate used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as \ref key_comparator. + */ + template + bool extract_with( exempt_ptr& dest, Q const& val, Less pred ) + { + dest = extract_at( head(), val, typename options::template less_wrapper::type() ); + return !dest.empty(); + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_MichaelList_rcu_find_val + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function makes RCU lock internally. + */ + template + bool find( Q const& key ) const + { + return find_at( head(), key, intrusive_key_comparator() ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_rcu_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& key, Less pred ) const + { + return find_at( head(), key, typename options::template less_wrapper::type() ); + } + + /// Finds the key \p val and performs an action with it + /** \anchor cds_nonintrusive_MichaelList_rcu_find_func + The function searches an item with key equal to \p val and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the function is only guarantee + that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + The function makes RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) const + { + return find_at( head(), val, intrusive_key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_rcu_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& val, Less pred, Func f ) const + { + return find_at( head(), val, typename options::template less_wrapper::type(), f ); + } + + /// Finds the key \p val and performs an action with it + /** \anchor cds_nonintrusive_MichaelList_rcu_find_cfunc + The function searches an item with key equal to \p val and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the function is only guarantee + that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function makes RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) const + { + return find_at( head(), val, intrusive_key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_rcu_find_cfunc "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& val, Less pred, Func f ) const + { + return find_at( head(), val, typename options::template less_wrapper::type(), f ); + } + + /// Finds the key \p val and return the item found + /** \anchor cds_nonintrusive_MichaelList_rcu_get + The function searches the item with key equal to \p val and returns the pointer to item found. + If \p val is not found it returns \p NULL. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::container::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > ord_list; + ord_list theList; + // ... + { + // Lock RCU + ord_list::rcu_lock lock; + + foo * pVal = theList.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + // Unlock RCU by rcu_lock destructor + // pVal can be freed at any time after RCU has been unlocked + } + \endcode + */ + template + value_type * get( Q const& val ) const + { + return get_at( head(), val, intrusive_key_comparator()); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + value_type * get_with( Q const& val, Less pred ) const + { + return get_at( head(), val, typename options::template less_wrapper::type()); + } + + /// Checks if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomics::empty_item_counter, + this function always returns 0. + + Warning: even if you use a real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Clears the list + /** + Post-condition: the list is empty + */ + void clear() + { + base_class::clear(); + } + + protected: + //@cond + bool insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != NULL ); + scoped_node_ptr p(pNode); + if ( base_class::insert_at( refHead, *pNode )) { + p.release(); + return true; + } + + return false; + } + + template + bool insert_at( head_type& refHead, Q const& val ) + { + return insert_node_at( refHead, alloc_node( val )); + } + + template + bool insert_at( head_type& refHead, Q const& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT +# ifdef CDS_BUG_STATIC_MEMBER_IN_LAMBDA + // GCC 4.5,4.6,4.7: node_to_value is unaccessible from lambda, + // like as MichaelList::node_to_value that requires to capture *this* despite on node_to_value is static function + value_type& (* n2v)( node_type& ) = node_to_value; + if ( base_class::insert_at( refHead, *pNode, [&f, n2v]( node_type& node ) { cds::unref(f)( n2v(node) ); } )) +# else + if ( base_class::insert_at( refHead, *pNode, [&f]( node_type& node ) { cds::unref(f)( node_to_value(node) ); } )) +# endif +# else + insert_functor wrapper( f ); + if ( base_class::insert_at( refHead, *pNode, cds::ref(wrapper) )) +# endif + { + pNode.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace_at( head_type& refHead, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(args) ... )); + } +# endif + + template + bool erase_at( head_type& refHead, Q const& key, Compare cmp, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT +# ifdef CDS_BUG_STATIC_MEMBER_IN_LAMBDA + // GCC 4.5-4.7: node_to_value is unaccessible from lambda, + // like as MichaelList::node_to_value that requires to capture *this* despite on node_to_value is static function + value_type const& (* n2v)( node_type const& ) = node_to_value; + return base_class::erase_at( refHead, key, cmp, [&f,n2v](node_type const& node){ cds::unref(f)( n2v(node) ); } ); +# else + return base_class::erase_at( refHead, key, cmp, [&f](node_type const& node){ cds::unref(f)( node_to_value(node) ); } ); +# endif +# else + erase_functor wrapper( f ); + return base_class::erase_at( refHead, key, cmp, cds::ref(wrapper) ); +# endif + } + + template + std::pair ensure_at( head_type& refHead, Q const& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT +# ifdef CDS_BUG_STATIC_MEMBER_IN_LAMBDA + // GCC 4.5-4.7: node_to_value is unaccessible from lambda, + // like as MichaelList::node_to_value that requires to capture *this* despite on node_to_value is static function + value_type& (* n2v)( node_type& ) = node_to_value; + std::pair ret = base_class::ensure_at( refHead, *pNode, + [&f, &key, n2v](bool bNew, node_type& node, node_type&){ cds::unref(f)( bNew, n2v(node), key ); }); +# else + std::pair ret = base_class::ensure_at( refHead, *pNode, + [&f, &key](bool bNew, node_type& node, node_type&){ cds::unref(f)( bNew, node_to_value(node), key ); }); +# endif +# else + ensure_functor wrapper( key, f ); + std::pair ret = base_class::ensure_at( refHead, *pNode, cds::ref(wrapper)); +# endif + if ( ret.first && ret.second ) + pNode.release(); + + return ret; + } + + template + node_type * extract_at( head_type& refHead, Q const& key, Compare cmp ) + { + return base_class::extract_at( refHead, key, cmp ); + } + + template + bool find_at( head_type& refHead, Q const& key, Compare cmp ) const + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_at( refHead, key, cmp, [](node_type&, Q const &) {} ); +# else + return base_class::find_at( refHead, key, cmp, empty_find_functor() ); +# endif + } + + template + bool find_at( head_type& refHead, Q& val, Compare cmp, Func f ) const + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT +# ifdef CDS_BUG_STATIC_MEMBER_IN_LAMBDA + // GCC 4.5-4.7: node_to_value is unaccessible from lambda, + // like as MichaelList::node_to_value that requires to capture *this* despite on node_to_value is static function + value_type& (* n2v)( node_type& ) = node_to_value; + return base_class::find_at( refHead, val, cmp, [&f, n2v](node_type& node, Q& v){ cds::unref(f)( n2v(node), v ); }); +# else + return base_class::find_at( refHead, val, cmp, [&f](node_type& node, Q& v){ cds::unref(f)( node_to_value(node), v ); }); +# endif +# else + find_functor wrapper( f ); + return base_class::find_at( refHead, val, cmp, cds::ref(wrapper) ); +# endif + } + + template + value_type * get_at( head_type& refHead, Q const& val, Compare cmp ) const + { + node_type * pNode = base_class::get_at( refHead, val, cmp ); + return pNode ? &pNode->m_Value : null_ptr(); + } + + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_MICHAEL_LIST_RCU_H diff --git a/cds/container/michael_map.h b/cds/container/michael_map.h new file mode 100644 index 00000000..a6c5d761 --- /dev/null +++ b/cds/container/michael_map.h @@ -0,0 +1,820 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_MAP_H +#define __CDS_CONTAINER_MICHAEL_MAP_H + +#include +#include + +namespace cds { namespace container { + + /// Michael's hash map + /** @ingroup cds_nonintrusive_map + \anchor cds_nonintrusive_MichaelHashMap_hp + + Source: + - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" + + Michael's hash table algorithm is based on lock-free ordered list and it is very simple. + The main structure is an array \p T of size \p M. Each element in \p T is basically a pointer + to a hash bucket, implemented as a singly linked list. The array of buckets cannot be dynamically expanded. + However, each bucket may contain unbounded number of items. + + Template parameters are: + - \p GC - Garbage collector used. You may use any \ref cds_garbage_collector "Garbage collector" + from the \p libcds library. + Note the \p GC must be the same as the GC used for \p OrderedList + - \p OrderedList - ordered key-value list implementation used as bucket for hash map, for example, MichaelKVList + or LazyKVList. The ordered list implementation specifies the \p Key and \p Value types stored in the hash-map, + the reclamation schema \p GC used by hash-map, the comparison functor for the type \p Key and other features + specific for the ordered list. + - \p Traits - type traits. See michael_map::type_traits for explanation. + + Instead of defining \p Traits struct you may use option-based syntax with \p michael_map::make_traits metafunction + (this metafunction is a synonym for michael_set::make_traits). + For \p michael_map::make_traits the following option may be used: + - opt::hash - mandatory option, specifies hash functor. + - opt::item_counter - optional, specifies item counting policy. See michael_map::type_traits for explanation. + - opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR. + + Many of the class function take a key argument of type \p K that in general is not \ref key_type. + \p key_type and an argument of template type \p K must meet the following requirements: + - \p key_type should be constructible from value of type \p K; + - the hash functor should be able to calculate correct hash value from argument \p key of type \p K: + hash( key_type(key) ) == hash( key ) + - values of type \p key_type and \p K should be comparable + + There are the specializations: + - for \ref cds_urcu_desc "RCU" - declared in cd/container/michael_map_rcu.h, + see \ref cds_nonintrusive_MichaelHashMap_rcu "MichaelHashMap". + - for \ref cds::gc::nogc declared in cds/container/michael_map_nogc.h, + see \ref cds_nonintrusive_MichaelHashMap_nogc "MichaelHashMap". + + Iterators + + The class supports a forward iterator (\ref iterator and \ref const_iterator). + The iteration is unordered. + The iterator object is thread-safe: the element pointed by the iterator object is guarded, + so, the element cannot be reclaimed while the iterator object is alive. + However, passing an iterator object between threads is dangerous. + + \warning Due to concurrent nature of Michael's set it is not guarantee that you can iterate + all elements in the set: any concurrent deletion can exclude the element + pointed by the iterator from the set, and your iteration can be terminated + before end of the set. Therefore, such iteration is more suitable for debugging purpose only + + Remember, each iterator object requires an additional hazard pointer, that may be + a limited resource for \p GC like as gc::HP and gc::HRC (for gc::PTB the count of + guards is unlimited). + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced. + + \anchor cds_nonintrusive_MichaelHashMap_how_touse + How to use + + Suppose, you want to make \p int to \p int map for Hazard Pointer garbage collector. You should + choose suitable ordered list class that will be used as a bucket for the map; it may be MichaelKVList. + \code + #include // MichaelKVList for gc::HP + #include // MIchaelHashMap + + // List traits based on std::less predicate + struct list_traits: public cds::container::michael_list::type_traits + { + typedef std::less less; + }; + + // Ordered list + typedef cds::container::MichaelKVList< cds::gc::HP, int, int, list_traits> int2int_list; + + // Map traits + struct map_traits: public cds::container::michael_map::type_traits + { + struct hash { + size_t operator()( int i ) const + { + return cds::opt::v::hash()( i ); + } + } + }; + + // Your map + typedef cds::container::MichaelHashMap< cds::gc::HP, int2int_list, map_traits > int2int_map; + + // Now you can use int2int_map class + + int main() + { + int2int_map theMap; + + theMap.insert( 100 ); + ... + } + \endcode + + You may use option-based declaration: + \code + #include // MichaelKVList for gc::HP + #include // MIchaelHashMap + + // Ordered list + typedef cds::container::MichaelKVList< cds::gc::HP, int, int, + typename cds::container::michael_list::make_traits< + cds::container::opt::less< std::less > // item comparator option + >::type + > int2int_list; + + // Map + typedef cds::container::MichaelHashMap< cds::gc::HP, int2int_list, + cds::container::michael_map::make_traits< + cc::opt::hash< cds::opt::v::hash > + > + > int2int_map; + \endcode + */ + template < + class GC, + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_map::type_traits +#else + class Traits +#endif + > + class MichaelHashMap + { + public: + typedef OrderedList bucket_type ; ///< type of ordered list used as a bucket implementation + typedef Traits options ; ///< Traits template parameters + + typedef typename bucket_type::key_type key_type ; ///< key type + typedef typename bucket_type::mapped_type mapped_type ; ///< value type + typedef typename bucket_type::value_type value_type ; ///< key/value pair stored in the map + + typedef GC gc ; ///< Garbage collector + typedef typename bucket_type::key_comparator key_comparator ; ///< key compare functor + + /// Hash functor for \ref key_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename options::hash >::type hash; + typedef typename options::item_counter item_counter ; ///< Item counter type + + /// Bucket table allocator + typedef cds::details::Allocator< bucket_type, typename options::allocator > bucket_table_allocator; + typedef typename bucket_type::guarded_ptr guarded_ptr; ///< Guarded pointer + + protected: + item_counter m_ItemCounter ; ///< Item counter + hash m_HashFunctor ; ///< Hash functor + + bucket_type * m_Buckets ; ///< bucket table + + private: + //@cond + const size_t m_nHashBitmask; + //@endcond + + protected: + /// Calculates hash value of \p key + template + size_t hash_value( Q const& key ) const + { + return m_HashFunctor( key ) & m_nHashBitmask; + } + + /// Returns the bucket (ordered list) for \p key + template + bucket_type& bucket( Q const& key ) + { + return m_Buckets[ hash_value( key ) ]; + } + + protected: + //@cond + /// Forward iterator + template + class iterator_type: private cds::intrusive::michael_set::details::iterator< bucket_type, IsConst > + { + typedef cds::intrusive::michael_set::details::iterator< bucket_type, IsConst > base_class; + friend class MichaelHashMap; + + protected: + typedef typename base_class::bucket_ptr bucket_ptr; + typedef typename base_class::list_iterator list_iterator; + + public: + /// Value pointer type (const for const_iterator) + typedef typename cds::details::make_const_type::pointer value_ptr; + /// Value reference type (const for const_iterator) + typedef typename cds::details::make_const_type::reference value_ref; + + /// Key-value pair pointer type (const for const_iterator) + typedef typename cds::details::make_const_type::pointer pair_ptr; + /// Key-value pair reference type (const for const_iterator) + typedef typename cds::details::make_const_type::reference pair_ref; + + protected: + iterator_type( list_iterator const& it, bucket_ptr pFirst, bucket_ptr pLast ) + : base_class( it, pFirst, pLast ) + {} + + public: + /// Default ctor + iterator_type() + : base_class() + {} + + /// Copy ctor + iterator_type( const iterator_type& src ) + : base_class( src ) + {} + + /// Dereference operator + pair_ptr operator ->() const + { + assert( base_class::m_pCurBucket != null_ptr() ); + return base_class::m_itList.operator ->(); + } + + /// Dereference operator + pair_ref operator *() const + { + assert( base_class::m_pCurBucket != null_ptr() ); + return base_class::m_itList.operator *(); + } + + /// Pre-increment + iterator_type& operator ++() + { + base_class::operator++(); + return *this; + } + + /// Assignment operator + iterator_type& operator = (const iterator_type& src) + { + base_class::operator =(src); + return *this; + } + + /// Returns current bucket (debug function) + bucket_ptr bucket() const + { + return base_class::bucket(); + } + + /// Equality operator + template + bool operator ==(iterator_type const& i ) + { + return base_class::operator ==( i ); + } + /// Equality operator + template + bool operator !=(iterator_type const& i ) + { + return !( *this == i ); + } + }; + //@endcond + + public: + /// Forward iterator + typedef iterator_type< false > iterator; + + /// Const forward iterator + typedef iterator_type< true > const_iterator; + + /// Returns a forward iterator addressing the first element in a map + /** + For empty map \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a map + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the map. + For empty map \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() ); + } + + /// Returns a forward const iterator addressing the first element in a map + //@{ + const_iterator begin() const + { + return get_const_begin(); + } + const_iterator cbegin() + { + return get_const_begin(); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a map + //@{ + const_iterator end() const + { + return get_const_end(); + } + const_iterator cend() + { + return get_const_end(); + } + //@} + + private: + //@cond + const_iterator get_const_begin() const + { + return const_iterator( const_cast(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count() ); + } + const_iterator get_const_end() const + { + return const_iterator( const_cast(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() ); + } + //@endcond + + public: + /// Initializes the map + /** + The Michael's hash map is non-expandable container. You should point the average count of items \p nMaxItemCount + when you create an object. + \p nLoadFactor parameter defines average count of items per bucket and it should be small number between 1 and 10. + Remember, since the bucket implementation is an ordered list, searching in the bucket is linear [O(nLoadFactor)]. + Note, that many popular STL hash map implementation uses load factor 1. + + The ctor defines hash table size as rounding nMacItemCount / nLoadFactor up to nearest power of two. + */ + MichaelHashMap( + size_t nMaxItemCount, ///< estimation of max item count in the hash map + size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket + ) : m_nHashBitmask( michael_map::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) + { + // GC and OrderedList::gc must be the same + static_assert(( std::is_same::value ), "GC and OrderedList::gc must be the same"); + + // atomicity::empty_item_counter is not allowed as a item counter + static_assert(( !std::is_same::value ), "atomicity::empty_item_counter is not allowed as a item counter"); + + m_Buckets = bucket_table_allocator().NewArray( bucket_count() ); + } + + /// Clears hash map and destroys it + ~MichaelHashMap() + { + clear(); + bucket_table_allocator().Delete( m_Buckets, bucket_count() ); + } + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \p key_type. + - The \p mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( const K& key ) + { + const bool bRet = bucket( key ).insert( key ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from \p key of type \p K. + - The \p mapped_type should be constructible from \p val of type \p V. + + Returns \p true if \p val is inserted into the map, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { + const bool bRet = bucket( key ).insert( key, val ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this map's item by concurrent threads. + The user-defined functor can be passed by reference using boost::ref + and it is called only if inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + */ + template + bool insert_key( const K& key, Func func ) + { + const bool bRet = bucket( key ).insert_key( key, func ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + + /// Ensures that the \p key exists in the map + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the map, then the new item created from \p key + is inserted into the map (note that in this case the \p key_type should be + constructible from type \p K). + Otherwise, the functor \p func is called with item found. + The functor \p Func may be a function with signature: + \code + void func( bool bNew, value_type& item ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + + The functor may change any fields of the \p item.second that is \p mapped_type; + however, \p func must guarantee that during changing no any other modifications + could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( K const& key, Func func ) + { + std::pair bRet = bucket( key ).ensure( key, func ); + if ( bRet.first && bRet.second ) + ++m_ItemCounter; + return bRet; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// For key \p key inserts data of type \p mapped_type constructed with std::forward(args)... + /** + \p key_type should be constructible from type \p K + + Returns \p true if inserting successful, \p false otherwise. + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( K&& key, Args&&... args ) + { + const bool bRet = bucket( key ).emplace( std::forward(key), std::forward(args)... ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } +# endif + + /// Deletes \p key from the map + /** \anchor cds_nonintrusive_MichaelMap_erase_val + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + const bool bRet = bucket( key ).erase( key ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelMap_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred ) + { + const bool bRet = bucket( key ).erase_with( key, pred ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes \p key from the map + /** \anchor cds_nonintrusive_MichaelMap_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + The functor may be passed by reference using boost:ref + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + const bool bRet = bucket( key ).erase( key, f ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelMap_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + const bool bRet = bucket( key ).erase_with( key, pred, f ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Extracts the item with specified \p key + /** \anchor cds_nonintrusive_MichaelHashMap_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the set, and returns it in \p dest parameter. + If the item with key equal to \p key is not found the function returns \p false. + + Note the compare functor should accept a parameter of type \p K that may be not the same as \p key_type. + + The extracted item is freed automatically when returned \ref guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::MichaelHashMap< your_template_args > michael_map; + michael_map theMap; + // ... + { + michael_map::guarded_ptr gp; + theMap.extract( gp, 5 ); + // Deal with gp + // ... + + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + bool extract( guarded_ptr& dest, K const& key ) + { + const bool bRet = bucket( key ).extract( dest, key ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Extracts the item using compare functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_MichaelHashMap_hp_extract "extract(guarded_ptr&, K const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + bool extract_with( guarded_ptr& dest, K const& key, Less pred ) + { + const bool bRet = bucket( key ).extract_with( dest, key, pred ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_MichaelMap_find_cfunc + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change \p item.second. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the map's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { + return bucket( key ).find( key, f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelMap_find_cfunc "find(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred, Func f ) + { + return bucket( key ).find_with( key, pred, f ); + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_MichaelMap_find_val + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool find( K const& key ) + { + return bucket( key ).find( key ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelMap_find_val "find(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred ) + { + return bucket( key ).find_with( key, pred ); + } + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_MichaelHashMap_hp_get + The function searches the item with key equal to \p key + and assigns the item found to guarded pointer \p ptr. + The function returns \p true if \p key is found, and \p false otherwise. + If \p key is not found the \p ptr parameter is not changed. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::MichaeHashMap< your_template_params > michael_map; + michael_map theMap; + // ... + { + michael_map::guarded_ptr gp; + if ( theMap.get( gp, 5 )) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for \p OrderedList template parameter + should accept a parameter of type \p K that can be not the same as \p key_type. + */ + template + bool get( guarded_ptr& ptr, K const& key ) + { + return bucket( key ).get( ptr, key ); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_MichaelHashMap_hp_get "get( guarded_ptr& ptr, K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + bool get_with( guarded_ptr& ptr, K const& key, Less pred ) + { + return bucket( key ).get_with( ptr, key, pred ); + } + + /// Clears the map (non-atomic) + /** + The function erases all items from the map. + + The function is not atomic. It cleans up each bucket and then resets the item counter to zero. + If there are a thread that performs insertion while \p clear is working the result is undefined in general case: + empty() may return \p true but the map may contain item(s). + Therefore, \p clear may be used only for debugging purposes. + */ + void clear() + { + for ( size_t i = 0; i < bucket_count(); ++i ) + m_Buckets[i].clear(); + m_ItemCounter.reset(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + Thus, the correct item counting is an important part of the map implementation. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the map + size_t size() const + { + return m_ItemCounter; + } + + /// Returns the size of hash table + /** + Since MichaelHashMap cannot dynamically extend the hash table size, + the value returned is an constant depending on object initialization parameters; + see MichaelHashMap::MichaelHashMap for explanation. + */ + size_t bucket_count() const + { + return m_nHashBitmask + 1; + } + }; +}} // namespace cds::container + +#endif // ifndef __CDS_CONTAINER_MICHAEL_MAP_H diff --git a/cds/container/michael_map_base.h b/cds/container/michael_map_base.h new file mode 100644 index 00000000..976f2def --- /dev/null +++ b/cds/container/michael_map_base.h @@ -0,0 +1,36 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_MAP_BASE_H +#define __CDS_CONTAINER_MICHAEL_MAP_BASE_H + +#include + +namespace cds { namespace container { + + /// MichaelHashMap related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace michael_map { + /// Type traits for MichaelHashMap class + typedef container::michael_set::type_traits type_traits; + + using container::michael_set::make_traits; + + //@cond + namespace details { + using michael_set::details::init_hash_bitmask; + } + //@endcond + + } // namespace michael_map + + //@cond + // Forward declarations + template + class MichaelHashMap; + //@endcond + +}} // namespace cds::container + + +#endif // ifndef __CDS_CONTAINER_MICHAEL_MAP_BASE_H diff --git a/cds/container/michael_map_nogc.h b/cds/container/michael_map_nogc.h new file mode 100644 index 00000000..7cb0f2d5 --- /dev/null +++ b/cds/container/michael_map_nogc.h @@ -0,0 +1,503 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_MAP_NOGC_H +#define __CDS_CONTAINER_MICHAEL_MAP_NOGC_H + +#include +#include +#include + +namespace cds { namespace container { + + /// Michael's hash map (template specialization for gc::nogc) + /** @ingroup cds_nonintrusive_map + \anchor cds_nonintrusive_MichaelHashMap_nogc + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of map item. + + See \ref cds_nonintrusive_MichaelHashMap_hp "MichaelHashMap" for description of template parameters. + + The interface of the specialization is a little different. + */ + template < + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_map::type_traits +#else + class Traits +#endif + > + class MichaelHashMap + { + public: + typedef OrderedList bucket_type ; ///< type of ordered list used as a bucket implementation + typedef Traits options ; ///< Traits template parameters + + typedef typename bucket_type::key_type key_type ; ///< key type + typedef typename bucket_type::mapped_type mapped_type ; ///< type of value stored in the list + typedef typename bucket_type::value_type value_type ; ///< Pair used as the some functor's argument + + typedef gc::nogc gc ; ///< No garbage collector + typedef typename bucket_type::key_comparator key_comparator ; ///< key comparison functor + + /// Hash functor for \ref key_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename options::hash >::type hash; + typedef typename options::item_counter item_counter ; ///< Item counter type + + /// Bucket table allocator + typedef cds::details::Allocator< bucket_type, typename options::allocator > bucket_table_allocator; + + protected: + //@cond + typedef typename bucket_type::iterator bucket_iterator; + typedef typename bucket_type::const_iterator bucket_const_iterator; + //@endcond + + protected: + item_counter m_ItemCounter ; ///< Item counter + hash m_HashFunctor ; ///< Hash functor + + bucket_type * m_Buckets ; ///< bucket table + + private: + //@cond + const size_t m_nHashBitmask; + //@endcond + + protected: + /// Calculates hash value of \p key + size_t hash_value( key_type const & key ) const + { + return m_HashFunctor( key ) & m_nHashBitmask; + } + + /// Returns the bucket (ordered list) for \p key + bucket_type& bucket( key_type const& key ) + { + return m_Buckets[ hash_value( key ) ]; + } + + protected: + protected: + /// Forward iterator + /** + \p IsConst - constness boolean flag + + The forward iterator for Michael's map is based on \p OrderedList forward iterator and has some features: + - it has no post-increment operator, only pre-increment + - it iterates items in unordered fashion + */ + template + class iterator_type: private cds::intrusive::michael_set::details::iterator< bucket_type, IsConst > + { + //@cond + typedef cds::intrusive::michael_set::details::iterator< bucket_type, IsConst > base_class; + friend class MichaelHashMap; + //@endcond + + protected: + //@cond + //typedef typename base_class::bucket_type bucket_type; + typedef typename base_class::bucket_ptr bucket_ptr; + typedef typename base_class::list_iterator list_iterator; + + //typedef typename bucket_type::key_type key_type; + //@endcond + + public: + /// Value pointer type (const for const_iterator) + typedef typename cds::details::make_const_type::pointer value_ptr; + /// Value reference type (const for const_iterator) + typedef typename cds::details::make_const_type::reference value_ref; + + /// Key-value pair pointer type (const for const_iterator) + typedef typename cds::details::make_const_type::pointer pair_ptr; + /// Key-value pair reference type (const for const_iterator) + typedef typename cds::details::make_const_type::reference pair_ref; + + protected: + //@cond + iterator_type( list_iterator const& it, bucket_ptr pFirst, bucket_ptr pLast ) + : base_class( it, pFirst, pLast ) + {} + //@endcond + + public: + /// Default ctor + iterator_type() + : base_class() + {} + + /// Copy ctor + iterator_type( const iterator_type& src ) + : base_class( src ) + {} + + /// Dereference operator + pair_ptr operator ->() const + { + assert( base_class::m_pCurBucket != null_ptr() ); + return base_class::m_itList.operator ->(); + } + + /// Dereference operator + pair_ref operator *() const + { + assert( base_class::m_pCurBucket != null_ptr() ); + return base_class::m_itList.operator *(); + } + + /// Pre-increment + iterator_type& operator ++() + { + base_class::operator++(); + return *this; + } + + /// Assignment operator + iterator_type& operator = (const iterator_type& src) + { + base_class::operator =(src); + return *this; + } + + /// Returns current bucket (debug function) + bucket_ptr bucket() const + { + return base_class::bucket(); + } + + /// Equality operator + template + bool operator ==(iterator_type const& i ) + { + return base_class::operator ==( i ); + } + /// Equality operator + template + bool operator !=(iterator_type const& i ) + { + return !( *this == i ); + } + }; + + + public: + /// Forward iterator + typedef iterator_type< false > iterator; + + /// Const forward iterator + typedef iterator_type< true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() ); + } + + /// Returns a forward const iterator addressing the first element in a set + //@{ + const_iterator begin() const + { + return get_const_begin(); + } + const_iterator cbegin() + { + return get_const_begin(); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a set + //@{ + const_iterator end() const + { + return get_const_end(); + } + const_iterator cend() + { + return get_const_end(); + } + //@} + + private: + //@{ + const_iterator get_const_begin() const + { + return const_iterator( const_cast(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count() ); + } + const_iterator get_const_end() const + { + return const_iterator( const_cast(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() ); + } + //@} + + public: + /// Initialize the map + /** + The Michael's hash map is non-expandable container. You should point the average count of items \p nMaxItemCount + when you create an object. + \p nLoadFactor parameter defines average count of items per bucket and it should be small number between 1 and 10. + Remember, since the bucket implementation is an ordered list, searching in the bucket is linear [O(nLoadFactor)]. + Note, that many popular STL hash map implementation uses load factor 1. + + The ctor defines hash table size as rounding nMacItemCount / nLoadFactor up to nearest power of two. + */ + MichaelHashMap( + size_t nMaxItemCount, ///< estimation of max item count in the hash set + size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket + ) : m_nHashBitmask( michael_map::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) + { + // GC and OrderedList::gc must be the same + static_assert(( std::is_same::value ), "GC and OrderedList::gc must be the same"); + + // atomicity::empty_item_counter is not allowed as a item counter + static_assert(( !std::is_same::value ),"atomicity::empty_item_counter is not allowed as a item counter"); + + m_Buckets = bucket_table_allocator().NewArray( bucket_count() ); + } + + /// Clear hash set and destroy it + ~MichaelHashMap() + { + clear(); + bucket_table_allocator().Delete( m_Buckets, bucket_count() ); + } + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( const K& key ) + { + bucket_type& refBucket = bucket( key ); + bucket_iterator it = refBucket.insert( key ); + + if ( it != refBucket.end() ) { + ++m_ItemCounter; + return iterator( it, &refBucket, m_Buckets + bucket_count() ); + } + + return end(); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( K const& key, V const& val ) + { + bucket_type& refBucket = bucket( key ); + bucket_iterator it = refBucket.insert( key, val ); + + if ( it != refBucket.end() ) { + ++m_ItemCounter; + return iterator( it, &refBucket, m_Buckets + bucket_count() ); + } + + return end(); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted. item.second is a reference to item's value that may be changed. + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this map's item by concurrent threads. + The user-defined functor can be passed by reference using boost::ref + and it is called only if the inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p f functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert_key( const K& key, Func func ) + { + bucket_type& refBucket = bucket( key ); + bucket_iterator it = refBucket.insert_key( key, func ); + + if ( it != refBucket.end() ) { + ++m_ItemCounter; + return iterator( it, &refBucket, m_Buckets + bucket_count() ); + } + + return end(); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// For key \p key inserts data of type \ref mapped_type constructed with std::forward(args)... + /** + \p key_type should be constructible from type \p K + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + iterator emplace( K&& key, Args&&... args ) + { + bucket_type& refBucket = bucket( key ); + bucket_iterator it = refBucket.emplace( std::forward(key), std::forward(args)... ); + + if ( it != refBucket.end() ) { + ++m_ItemCounter; + return iterator( it, &refBucket, m_Buckets + bucket_count() ); + } + + return end(); + } +# endif + + /// Ensures that the key \p key exists in the map + /** + The operation inserts new item if the key \p key is not found in the map. + Otherwise, the function returns an iterator that points to item found. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted, \p second is true if new item has been added or \p false if the item + already is in the list. + */ + template + std::pair ensure( const K& key ) + { + bucket_type& refBucket = bucket( key ); + std::pair ret = refBucket.ensure( key ); + + if ( ret.second ) + ++m_ItemCounter; + + return std::make_pair( iterator( ret.first, &refBucket, m_Buckets + bucket_count() ), ret.second ); + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_MichaelMap_nogc_find + + The function searches the item with key equal to \p key + and returns an iterator pointed to item found if the key is found, + and \ref end() otherwise + */ + template + iterator find( const K& key ) + { + bucket_type& refBucket = bucket( key ); + bucket_iterator it = refBucket.find( key ); + + if ( it != refBucket.end() ) + return iterator( it, &refBucket, m_Buckets + bucket_count() ); + + return end(); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelMap_nogc_find "find(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + iterator find_with( const K& key, Less pred ) + { + bucket_type& refBucket = bucket( key ); + bucket_iterator it = refBucket.find_with( key, pred ); + + if ( it != refBucket.end() ) + return iterator( it, &refBucket, m_Buckets + bucket_count() ); + + return end(); + } + + /// Clears the map (non-atomic) + /** + The function deletes all items from the map. + The function is not atomic. It cleans up each bucket and then resets the item counter to zero. + If there are a thread that performs insertion while \p clear is working the result is undefined in general case: + empty() may return \p true but the map may contain item(s). + */ + void clear() + { + for ( size_t i = 0; i < bucket_count(); ++i ) + m_Buckets[i].clear(); + m_ItemCounter.reset(); + } + + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + Thus, the correct item counting feature is an important part of Michael's map implementation. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the map + size_t size() const + { + return m_ItemCounter; + } + + /// Returns the size of hash table + /** + Since MichaelHashMap cannot dynamically extend the hash table size, + the value returned is an constant depending on object initialization parameters; + see MichaelHashMap::MichaelHashMap for explanation. + */ + size_t bucket_count() const + { + return m_nHashBitmask + 1; + } + + }; +}} // namespace cds::container + +#endif // ifndef __CDS_CONTAINER_MICHAEL_MAP_NOGC_H diff --git a/cds/container/michael_map_rcu.h b/cds/container/michael_map_rcu.h new file mode 100644 index 00000000..eaabe397 --- /dev/null +++ b/cds/container/michael_map_rcu.h @@ -0,0 +1,781 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_MAP_RCU_H +#define __CDS_CONTAINER_MICHAEL_MAP_RCU_H + +#include +#include + +namespace cds { namespace container { + + /// Michael's hash map (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_nonintrusive_map + \anchor cds_nonintrusive_MichaelHashMap_rcu + + Source: + - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" + + Michael's hash table algorithm is based on lock-free ordered list and it is very simple. + The main structure is an array \p T of size \p M. Each element in \p T is basically a pointer + to a hash bucket, implemented as a singly linked list. The array of buckets cannot be dynamically expanded. + However, each bucket may contain unbounded number of items. + + Template parameters are: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p OrderedList - ordered key-value list implementation used as bucket for hash map, for example, MichaelKVList. + The ordered list implementation specifies the \p Key and \p Value types stored in the hash-map, the reclamation + schema \p GC used by hash-map, the comparison functor for the type \p Key and other features specific for + the ordered list. + - \p Traits - type traits. See michael_map::type_traits for explanation. + + Instead of defining \p Traits struct you may use option-based syntax with \p michael_map::make_traits metafunction + (this metafunction is a synonym for michael_set::make_traits). + For \p michael_map::make_traits the following option may be used: + - opt::hash - mandatory option, specifies hash functor. + - opt::item_counter - optional, specifies item counting policy. See michael_map::type_traits for explanation. + - opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR. + + Many of the class function take a key argument of type \p K that in general is not \ref key_type. + \p key_type and an argument of template type \p K must meet the following requirements: + - \p key_type should be constructible from value of type \p K; + - the hash functor should be able to calculate correct hash value from argument \p key of type \p K: + hash( key_type(key) ) == hash( key ) + - values of type \p key_type and \p K should be comparable + + How to use + + The tips about how to use Michael's map see \ref cds_nonintrusive_MichaelHashMap_how_touse "MichaelHashMap". + Remember, that you should include RCU-related header file (for example, cds/urcu/general_buffered.h) + before including cds/container/michael_map_rcu.h. + */ + template < + class RCU, + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_map::type_traits +#else + class Traits +#endif + > + class MichaelHashMap< cds::urcu::gc< RCU >, OrderedList, Traits > + { + public: + typedef OrderedList bucket_type ; ///< type of ordered list used as a bucket implementation + typedef Traits options ; ///< Traits template parameters + + typedef typename bucket_type::key_type key_type ; ///< key type + typedef typename bucket_type::mapped_type mapped_type ; ///< value type + typedef typename bucket_type::value_type value_type ; ///< key/value pair stored in the list + + typedef cds::urcu::gc< RCU > gc ; ///< RCU used as garbage collector + typedef typename bucket_type::key_comparator key_comparator ; ///< key comparison functor + + /// Hash functor for \ref key_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename options::hash >::type hash; + typedef typename options::item_counter item_counter ; ///< Item counter type + + /// Bucket table allocator + typedef cds::details::Allocator< bucket_type, typename options::allocator > bucket_table_allocator; + + typedef typename bucket_type::rcu_lock rcu_lock ; ///< RCU scoped lock + typedef typename bucket_type::exempt_ptr exempt_ptr ; ///< pointer to extracted node + /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that + static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = bucket_type::c_bExtractLockExternal; + + protected: + item_counter m_ItemCounter ; ///< Item counter + hash m_HashFunctor ; ///< Hash functor + + bucket_type * m_Buckets ; ///< bucket table + + private: + //@cond + const size_t m_nHashBitmask; + //@endcond + + protected: + /// Calculates hash value of \p key + template + size_t hash_value( Q const& key ) const + { + return m_HashFunctor( key ) & m_nHashBitmask; + } + + /// Returns the bucket (ordered list) for \p key + //@{ + template + bucket_type& bucket( Q const& key ) + { + return m_Buckets[ hash_value( key ) ]; + } + template + bucket_type const& bucket( Q const& key ) const + { + return m_Buckets[ hash_value( key ) ]; + } + //@} + protected: + /// Forward iterator + /** + \p IsConst - constness boolean flag + + The forward iterator for Michael's map is based on \p OrderedList forward iterator and has the following features: + - it has no post-increment operator, only pre-increment + - it iterates items in unordered fashion + - The iterator cannot be moved across thread boundary since it may contain GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the map. + + Therefore, the use of iterators in concurrent environment is not good idea. Use the iterator for the concurrent container + for debug purpose only. + */ + template + class iterator_type: private cds::intrusive::michael_set::details::iterator< bucket_type, IsConst > + { + //@cond + typedef cds::intrusive::michael_set::details::iterator< bucket_type, IsConst > base_class; + friend class MichaelHashMap; + //@endcond + + protected: + //@cond + //typedef typename base_class::bucket_type bucket_type; + typedef typename base_class::bucket_ptr bucket_ptr; + typedef typename base_class::list_iterator list_iterator; + + //typedef typename bucket_type::key_type key_type; + //@endcond + + public: + /// Value pointer type (const for const_iterator) + typedef typename cds::details::make_const_type::pointer value_ptr; + /// Value reference type (const for const_iterator) + typedef typename cds::details::make_const_type::reference value_ref; + + /// Key-value pair pointer type (const for const_iterator) + typedef typename cds::details::make_const_type::pointer pair_ptr; + /// Key-value pair reference type (const for const_iterator) + typedef typename cds::details::make_const_type::reference pair_ref; + + protected: + //@cond + iterator_type( list_iterator const& it, bucket_ptr pFirst, bucket_ptr pLast ) + : base_class( it, pFirst, pLast ) + {} + //@endcond + + public: + /// Default ctor + iterator_type() + : base_class() + {} + + /// Copy ctor + iterator_type( const iterator_type& src ) + : base_class( src ) + {} + + /// Dereference operator + pair_ptr operator ->() const + { + assert( base_class::m_pCurBucket != null_ptr() ); + return base_class::m_itList.operator ->(); + } + + /// Dereference operator + pair_ref operator *() const + { + assert( base_class::m_pCurBucket != null_ptr() ); + return base_class::m_itList.operator *(); + } + + /// Pre-increment + iterator_type& operator ++() + { + base_class::operator++(); + return *this; + } + + /// Assignment operator + iterator_type& operator = (const iterator_type& src) + { + base_class::operator =(src); + return *this; + } + + /// Returns current bucket (debug function) + bucket_ptr bucket() const + { + return base_class::bucket(); + } + + /// Equality operator + template + bool operator ==(iterator_type const& i ) + { + return base_class::operator ==( i ); + } + /// Equality operator + template + bool operator !=(iterator_type const& i ) + { + return !( *this == i ); + } + }; + + public: + /// Forward iterator + typedef iterator_type< false > iterator; + + /// Const forward iterator + typedef iterator_type< true > const_iterator; + + /// Returns a forward iterator addressing the first element in a map + /** + For empty map \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a map + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the map. + For empty map \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() ); + } + + /// Returns a forward const iterator addressing the first element in a map + //@{ + const_iterator begin() const + { + return get_const_begin(); + } + const_iterator cbegin() + { + return get_const_begin(); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a map + //@{ + const_iterator end() const + { + return get_const_end(); + } + const_iterator cend() + { + return get_const_end(); + } + //@} + + private: + //@cond + const_iterator get_const_begin() const + { + return const_iterator( const_cast(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count() ); + } + const_iterator get_const_end() const + { + return const_iterator( const_cast(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() ); + } + //@endcond + + public: + /// Initializes the map + /** + The Michael's hash map is non-expandable container. You should point the average count of items \p nMaxItemCount + when you create an object. + \p nLoadFactor parameter defines average count of items per bucket and it should be small number between 1 and 10. + Remember, since the bucket implementation is an ordered list, searching in the bucket is linear [O(nLoadFactor)]. + Note, that many popular STL hash map implementation uses load factor 1. + + The ctor defines hash table size as rounding nMacItemCount / nLoadFactor up to nearest power of two. + */ + MichaelHashMap( + size_t nMaxItemCount, ///< estimation of max item count in the hash map + size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket + ) : m_nHashBitmask( michael_map::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) + { + // GC and OrderedList::gc must be the same + static_assert(( std::is_same::value ), "GC and OrderedList::gc must be the same"); + + // atomicity::empty_item_counter is not allowed as a item counter + static_assert(( !std::is_same::value ), "atomicity::empty_item_counter is not allowed as a item counter"); + + m_Buckets = bucket_table_allocator().NewArray( bucket_count() ); + } + + /// Clears hash map and destroys it + ~MichaelHashMap() + { + clear(); + bucket_table_allocator().Delete( m_Buckets, bucket_count() ); + } + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + The function applies RCU lock internally. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( const K& key ) + { + const bool bRet = bucket( key ).insert( key ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + The function applies RCU lock internally. + + Returns \p true if \p val is inserted into the map, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { + const bool bRet = bucket( key ).insert( key, val ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this map's item by concurrent threads. + The user-defined functor can be passed by reference using boost::ref + and it is called only if inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + The function applies RCU lock internally. + */ + template + bool insert_key( const K& key, Func func ) + { + const bool bRet = bucket( key ).insert_key( key, func ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + + /// Ensures that the \p key exists in the map + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the map, then the new item created from \p key + is inserted into the map (note that in this case the \ref key_type should be + constructible from type \p K). + Otherwise, the functor \p func is called with item found. + The functor \p Func may be a function with signature: + \code + void func( bool bNew, value_type& item ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + + The functor may change any fields of the \p item.second that is \ref mapped_type; + however, \p func must guarantee that during changing no any other modifications + could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + The function applies RCU lock internally. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( K const& key, Func func ) + { + std::pair bRet = bucket( key ).ensure( key, func ); + if ( bRet.first && bRet.second ) + ++m_ItemCounter; + return bRet; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// For key \p key inserts data of type \ref mapped_type constructed with std::forward(args)... + /** + \p key_type should be constructible from type \p K + + Returns \p true if inserting successful, \p false otherwise. + + The function applies RCU lock internally. + + @note This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( K&& key, Args&&... args ) + { + const bool bRet = bucket( key ).emplace( std::forward(key), std::forward(args)... ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } +# endif + + /// Deletes \p key from the map + /** \anchor cds_nonintrusive_MichaelMap_rcu_erase_val + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( const K& key ) + { + const bool bRet = bucket( key ).erase( key ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelMap_rcu_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less predicate has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( const K& key, Less pred ) + { + const bool bRet = bucket( key ).erase_with( key, pred ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes \p key from the map + /** \anchor cds_nonintrusive_MichaelMap_rcu_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + The functor may be passed by reference using boost:ref + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( const K& key, Func f ) + { + const bool bRet = bucket( key ).erase( key, f ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelMap_rcu_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( const K& key, Less pred, Func f ) + { + const bool bRet = bucket( key ).erase_with( key, pred, f ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Extracts an item from the map + /** \anchor cds_nonintrusive_MichaelHashMap_rcu_extract + The function searches an item with key equal to \p key, + unlinks it from the map, places item pointer into \p dest argument, and returns \p true. + If the item is not found the function return \p false. + + @note The function does NOT call RCU read-side lock or synchronization, + and does NOT dispose the item found. It just excludes the item from the map + and returns a pointer to item found. + You should lock RCU before calling of the function, and you should synchronize RCU + outside the RCU lock to free extracted item + + \code + #include + #include + #include + + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::container::MichaelKVList< rcu, int, Foo > rcu_michael_list; + typedef cds::container::MichaelHashMap< rcu, rcu_michael_list, foo_traits > rcu_michael_map; + + rcu_michael_map theMap; + // ... + + rcu_michael_map::exempt_ptr p; + { + // first, we should lock RCU + rcu_michael_map::rcu_lock lock; + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + if ( theMap.extract( p, 10 )) { + // do something with p + ... + } + } + + // We may safely release p here + // release() passes the pointer to RCU reclamation cycle + p.release(); + \endcode + */ + template + bool extract( exempt_ptr& dest, K const& key ) + { + if ( bucket( key ).extract( dest, key )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Extracts an item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelHashMap_rcu_extract "extract(exempt_ptr&, K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + bool extract_with( exempt_ptr& dest, K const& key, Less pred ) + { + if ( bucket( key ).extract_with( dest, key, pred )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_MichaelMap_rcu_find_cfunc + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change \p item.second. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the map's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The function applies RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) const + { + return bucket( key ).find( key, f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelMap_rcu_find_cfunc "find(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred, Func f ) const + { + return bucket( key ).find_with( key, pred, f ); + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_MichaelMap_rcu_find_val + + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool find( K const& key ) const + { + return bucket( key ).find( key ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelMap_rcu_find_val "find(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred ) const + { + return bucket( key ).find_with( key, pred ); + } + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_MichaelHashMap_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns \p NULL. + + Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::container::MichaelHashMap< your_template_parameters > hash_map; + hash_map theMap; + // ... + { + // Lock RCU + hash_map::rcu_lock lock; + + hash_map::value_type * = theMap.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + // Unlock RCU by rcu_lock destructor + // pVal can be freed at any time after RCU has been unlocked + } + \endcode + */ + template + value_type * get( K const& key ) const + { + return bucket( key ).get( key ); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_MichaelHashMap_rcu_get "get(K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + value_type * get_with( K const& key, Less pred ) const + { + return bucket( key ).get_with( key, pred ); + } + + /// Clears the map (non-atomic) + /** + The function erases all items from the map. + + The function is not atomic. It cleans up each bucket and then resets the item counter to zero. + If there are a thread that performs insertion while \p clear is working the result is undefined in general case: + empty() may return \p true but the map may contain item(s). + Therefore, \p clear may be used only for debugging purposes. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + void clear() + { + for ( size_t i = 0; i < bucket_count(); ++i ) + m_Buckets[i].clear(); + m_ItemCounter.reset(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + Thus, the correct item counting is an important part of the map implementation. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the map + size_t size() const + { + return m_ItemCounter; + } + + /// Returns the size of hash table + /** + Since MichaelHashMap cannot dynamically extend the hash table size, + the value returned is an constant depending on object initialization parameters; + see MichaelHashMap::MichaelHashMap for explanation. + */ + size_t bucket_count() const + { + return m_nHashBitmask + 1; + } + }; +}} // namespace cds::container + +#endif // ifndef __CDS_CONTAINER_MICHAEL_MAP_RCU_H diff --git a/cds/container/michael_set.h b/cds/container/michael_set.h new file mode 100644 index 00000000..9bdcfaef --- /dev/null +++ b/cds/container/michael_set.h @@ -0,0 +1,772 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_SET_H +#define __CDS_CONTAINER_MICHAEL_SET_H + +#include +#include + +namespace cds { namespace container { + + /// Michael's hash set + /** @ingroup cds_nonintrusive_set + \anchor cds_nonintrusive_MichaelHashSet_hp + + Source: + - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" + + Michael's hash table algorithm is based on lock-free ordered list and it is very simple. + The main structure is an array \p T of size \p M. Each element in \p T is basically a pointer + to a hash bucket, implemented as a singly linked list. The array of buckets cannot be dynamically expanded. + However, each bucket may contain unbounded number of items. + + Template parameters are: + - \p GC - Garbage collector used. You may use any \ref cds_garbage_collector "Garbage collector" + from the \p libcds library. + Note the \p GC must be the same as the GC used for \p OrderedList + - \p OrderedList - ordered list implementation used as bucket for hash set, for example, MichaelList. + The ordered list implementation specifies the type \p T stored in the hash-set, the reclamation + schema \p GC used by hash-set, the comparison functor for the type \p T and other features specific for + the ordered list. + - \p Traits - type traits. See michael_set::type_traits for explanation. + + Instead of defining \p Traits struct you may use option-based syntax with michael_set::make_traits metafunction. + For michael_set::make_traits the following option may be used: + - opt::hash - mandatory option, specifies hash functor. + - opt::item_counter - optional, specifies item counting policy. See michael_set::type_traits for explanation. + - opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR. + + There are the specializations: + - for \ref cds_urcu_desc "RCU" - declared in cd/container/michael_set_rcu.h, + see \ref cds_nonintrusive_MichaelHashSet_rcu "MichaelHashSet". + - for \ref cds::gc::nogc declared in cds/container/michael_set_nogc.h, + see \ref cds_nonintrusive_MichaelHashSet_nogc "MichaelHashSet". + + \anchor cds_nonintrusive_MichaelHashSet_hash_functor + Hash functor + + Some member functions of Michael's hash set accept the key parameter of type \p Q which differs from node type \p value_type. + It is expected that type \p Q contains full key of node type \p value_type, and if keys of type \p Q and \p value_type + are equal the hash values of these keys must be equal too. + + The hash functor Traits::hash should accept parameters of both type: + \code + // Our node type + struct Foo { + std::string key_ ; // key field + // ... other fields + }; + + // Hash functor + struct fooHash { + size_t operator()( const std::string& s ) const + { + return std::hash( s ); + } + + size_t operator()( const Foo& f ) const + { + return (*this)( f.key_ ); + } + }; + \endcode + + Iterators + + The class supports a forward iterator (\ref iterator and \ref const_iterator). + The iteration is unordered. + The iterator object is thread-safe: the element pointed by the iterator object is guarded, + so, the element cannot be reclaimed while the iterator object is alive. + However, passing an iterator object between threads is dangerous. + + \warning Due to concurrent nature of Michael's set it is not guarantee that you can iterate + all elements in the set: any concurrent deletion can exclude the element + pointed by the iterator from the set, and your iteration can be terminated + before end of the set. Therefore, such iteration is more suitable for debugging purpose only + + Remember, each iterator object requires an additional hazard pointer, that may be + a limited resource for \p GC like as gc::HP and gc::HRC (for gc::PTB the count of + guards is unlimited). + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced. + + How to use + + Suppose, we have the following type \p Foo that we want to store in our MichaelHashSet: + \code + struct Foo { + int nKey ; // key field + int nVal ; // value field + }; + \endcode + + To use \p %MichaelHashSet for \p Foo values, you should first choose suitable ordered list class + that will be used as a bucket for the set. We will use gc::PTB reclamation schema and + MichaelList as a bucket type. Also, for ordered list we should develop a comparator for our \p Foo + struct. + \code + #include + #include + + namespace cc = cds::container; + + // Foo comparator + struct Foo_cmp { + int operator ()(Foo const& v1, Foo const& v2 ) const + { + if ( std::less( v1.nKey, v2.nKey )) + return -1; + return std::less(v2.nKey, v1.nKey) ? 1 : 0; + } + }; + + // Our ordered list + typedef cc::MichaelList< cds::gc::PTB, Foo, + typename cc::michael_list::make_traits< + cc::opt::compare< Foo_cmp > // item comparator option + >::type + > bucket_list; + + // Hash functor for Foo + struct foo_hash { + size_t operator ()( int i ) const + { + return std::hash( i ); + } + size_t operator()( Foo const& i ) const + { + return std::hash( i.nKey ); + } + }; + + // Declare set type. + // Note that \p GC template parameter of ordered list must be equal \p GC for the set. + typedef cc::MichaelHashSet< cds::gc::PTB, bucket_list, + cc::michael_set::make_traits< + cc::opt::hash< foo_hash > + >::type + > foo_set; + + // Set variable + foo_set fooSet; + \endcode + */ + template < + class GC, + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_set::type_traits +#else + class Traits +#endif + > + class MichaelHashSet + { + public: + typedef OrderedList bucket_type ; ///< type of ordered list used as a bucket implementation + typedef Traits options ; ///< Traits template parameters + + typedef typename bucket_type::value_type value_type ; ///< type of value stored in the list + typedef GC gc ; ///< Garbage collector + typedef typename bucket_type::key_comparator key_comparator ; ///< key comparison functor + + /// Hash functor for \ref value_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename options::hash >::type hash; + typedef typename options::item_counter item_counter ; ///< Item counter type + + /// Bucket table allocator + typedef cds::details::Allocator< bucket_type, typename options::allocator > bucket_table_allocator; + + typedef typename bucket_type::guarded_ptr guarded_ptr; ///< Guarded pointer + + protected: + item_counter m_ItemCounter ; ///< Item counter + hash m_HashFunctor ; ///< Hash functor + + bucket_type * m_Buckets ; ///< bucket table + + private: + //@cond + const size_t m_nHashBitmask; + //@endcond + + protected: + /// Calculates hash value of \p key + template + size_t hash_value( Q const& key ) const + { + return m_HashFunctor( key ) & m_nHashBitmask; + } + + /// Returns the bucket (ordered list) for \p key + template + bucket_type& bucket( Q const& key ) + { + return m_Buckets[ hash_value( key ) ]; + } + + public: + /// Forward iterator + typedef michael_set::details::iterator< bucket_type, false > iterator; + + /// Const forward iterator + typedef michael_set::details::iterator< bucket_type, true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() ); + } + + /// Returns a forward const iterator addressing the first element in a set + //@{ + const_iterator begin() const + { + return get_const_begin(); + } + const_iterator cbegin() + { + return get_const_begin(); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a set + //@{ + const_iterator end() const + { + return get_const_end(); + } + const_iterator cend() + { + return get_const_end(); + } + //@} + + private: + //@cond + const_iterator get_const_begin() const + { + return const_iterator( const_cast(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count() ); + } + const_iterator get_const_end() const + { + return const_iterator( const_cast(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() ); + } + //@endcond + + public: + /// Initialize hash set + /** + The Michael's hash set is non-expandable container. You should point the average count of items \p nMaxItemCount + when you create an object. + \p nLoadFactor parameter defines average count of items per bucket and it should be small number between 1 and 10. + Remember, since the bucket implementation is an ordered list, searching in the bucket is linear [O(nLoadFactor)]. + Note, that many popular STL hash map implementation uses load factor 1. + + The ctor defines hash table size as rounding nMacItemCount / nLoadFactor up to nearest power of two. + */ + MichaelHashSet( + size_t nMaxItemCount, ///< estimation of max item count in the hash set + size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket + ) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) + { + // GC and OrderedList::gc must be the same + static_assert(( std::is_same::value ), "GC and OrderedList::gc must be the same"); + + // atomicity::empty_item_counter is not allowed as a item counter + static_assert(( !std::is_same::value ), "atomicity::empty_item_counter is not allowed as a item counter"); + + m_Buckets = bucket_table_allocator().NewArray( bucket_count() ); + } + + /// Clear hash set and destroy it + ~MichaelHashSet() + { + clear(); + bucket_table_allocator().Delete( m_Buckets, bucket_count() ); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain as minimum the complete key for the node. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + const bool bRet = bucket( val ).insert( val ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-fields of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. It may be passed by reference + using boost::ref + */ + template + bool insert( Q const& val, Func f ) + { + const bool bRet = bucket( val ).insert( val, f ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Ensures that the item exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p val key not found in the set, then the new item created from \p val + is inserted into the set. Otherwise, the functor \p func is called with the item found. + The functor \p Func should be a function with signature: + \code + void func( bool bNew, value_type& item, const Q& val ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p key passed into the \p ensure function + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair ensure( const Q& val, Func func ) + { + std::pair bRet = bucket( val ).ensure( val, func ); + if ( bRet.first && bRet.second ) + ++m_ItemCounter; + return bRet; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + @note This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + bool bRet = bucket( value_type(std::forward(args)...) ).emplace( std::forward(args)... ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } +# endif + + /// Deletes \p key from the set + /** \anchor cds_nonintrusive_MichaelSet_erase_val + + Since the key of MichaelHashSet's item type \ref value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The set item comparator should be able to compare the type \p value_type + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + const bool bRet = bucket( key ).erase( key ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelSet_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + const bool bRet = bucket( key ).erase_with( key, pred ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes \p key from the set + /** \anchor cds_nonintrusive_MichaelSet_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type const& val); + }; + \endcode + The functor may be passed by reference using boost:ref + + Since the key of %MichaelHashSet's \p value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { + const bool bRet = bucket( key ).erase( key, f ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelSet_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + const bool bRet = bucket( key ).erase_with( key, pred, f ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Extracts the item with specified \p key + /** \anchor cds_nonintrusive_MichaelHashSet_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the set, and returns it in \p dest parameter. + If the item with key equal to \p key is not found the function returns \p false. + + Note the compare functor should accept a parameter of type \p Q that may be not the same as \p value_type. + + The extracted item is freed automatically when returned \ref guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::MichaelHashSet< your_template_args > michael_set; + michael_set theSet; + // ... + { + michael_set::guarded_ptr gp; + theSet.extract( gp, 5 ); + // Deal with gp + // ... + + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + bool extract( guarded_ptr& dest, Q const& key ) + { + const bool bRet = bucket( key ).extract( dest, key ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Extracts the item using compare functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_MichaelHashSet_hp_extract "extract(guarded_ptr&, Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool extract_with( guarded_ptr& dest, Q const& key, Less pred ) + { + const bool bRet = bucket( key ).extract_with( dest, key, pred ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Finds the key \p val + /** \anchor cds_nonintrusive_MichaelSet_find_func + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return bucket( val ).find( val, f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelSet_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { + return bucket( val ).find_with( val, pred, f ); + } + + /// Finds the key \p val + /** \anchor cds_nonintrusive_MichaelSet_find_cfunc + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return bucket( val ).find( val, f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelSet_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { + return bucket( val ).find_with( val, pred, f ); + } + + /// Finds the key \p val + /** \anchor cds_nonintrusive_MichaelSet_find_val + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \ref value_type. + */ + template + bool find( Q const& val ) + { + return bucket( val ).find( val ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelSet_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred ) + { + return bucket( val ).find_with( val, pred ); + } + + /// Finds the key \p val and return the item found + /** \anchor cds_nonintrusive_MichaelHashSet_hp_get + The function searches the item with key equal to \p val + and assigns the item found to guarded pointer \p ptr. + The function returns \p true if \p val is found, and \p false otherwise. + If \p val is not found the \p ptr parameter is not changed. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::MichaeHashSet< your_template_params > michael_set; + michael_set theSet; + // ... + { + michael_set::guarded_ptr gp; + if ( theSet.get( gp, 5 )) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for \p OrderedList template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool get( guarded_ptr& ptr, Q const& val ) + { + return bucket( val ).get( ptr, val ); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_nonintrusive_MichaelHashSet_hp_get "get( guarded_ptr& ptr, Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool get_with( guarded_ptr& ptr, Q const& val, Less pred ) + { + return bucket( val ).get_with( ptr, val, pred ); + } + + /// Clears the set (non-atomic) + /** + The function erases all items from the set. + + The function is not atomic. It cleans up each bucket and then resets the item counter to zero. + If there are a thread that performs insertion while \p clear is working the result is undefined in general case: + empty() may return \p true but the set may contain item(s). + Therefore, \p clear may be used only for debugging purposes. + */ + void clear() + { + for ( size_t i = 0; i < bucket_count(); ++i ) + m_Buckets[i].clear(); + m_ItemCounter.reset(); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, the correct item counting feature is an important part of Michael's set implementation. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + size_t size() const + { + return m_ItemCounter; + } + + /// Returns the size of hash table + /** + Since MichaelHashSet cannot dynamically extend the hash table size, + the value returned is an constant depending on object initialization parameters; + see MichaelHashSet::MichaelHashSet for explanation. + */ + size_t bucket_count() const + { + return m_nHashBitmask + 1; + } + }; + +}} // namespace cds::container + +#endif // ifndef __CDS_CONTAINER_MICHAEL_SET_H diff --git a/cds/container/michael_set_base.h b/cds/container/michael_set_base.h new file mode 100644 index 00000000..41ea1473 --- /dev/null +++ b/cds/container/michael_set_base.h @@ -0,0 +1,44 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_SET_BASE_H +#define __CDS_CONTAINER_MICHAEL_SET_BASE_H + +#include + +namespace cds { namespace container { + + /// MichaelHashSet related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace michael_set { + + /// Type traits for MichaelHashSet class (typedef for cds::intrusive::michael_set::type_traits) + typedef intrusive::michael_set::type_traits type_traits; + + /// Metafunction converting option list to traits struct + /** + This is a synonym for intrusive::michael_set::make_traits + */ + template + struct make_traits { + typedef typename intrusive::michael_set::make_traits::type type ; ///< Result of metafunction + }; + + //@cond + namespace details { + using intrusive::michael_set::details::init_hash_bitmask; + using intrusive::michael_set::details::list_iterator_selector; + using intrusive::michael_set::details::iterator; + } + //@endcond + } + + //@cond + // Forward declarations + template + class MichaelHashSet; + //@endcond + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_MICHAEL_SET_BASE_H diff --git a/cds/container/michael_set_nogc.h b/cds/container/michael_set_nogc.h new file mode 100644 index 00000000..46b6058e --- /dev/null +++ b/cds/container/michael_set_nogc.h @@ -0,0 +1,329 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_SET_NOGC_H +#define __CDS_CONTAINER_MICHAEL_SET_NOGC_H + +#include +#include +#include + +namespace cds { namespace container { + + /// Michael's hash set (template specialization for gc::nogc) + /** @ingroup cds_nonintrusive_set + \anchor cds_nonintrusive_MichaelHashSet_nogc + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of list item. + + See \ref cds_nonintrusive_MichaelHashSet_hp "MichaelHashSet" for description of template parameters. + The template parameter \p OrderedList should be any gc::nogc-derived ordered list, for example, + \ref cds_nonintrusive_MichaelList_nogc "persistent MichaelList". + + The interface of the specialization is a slightly different. + */ + template < + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_set::type_traits +#else + class Traits +#endif + > + class MichaelHashSet< gc::nogc, OrderedList, Traits > + { + public: + typedef OrderedList bucket_type ; ///< type of ordered list used as a bucket implementation + typedef Traits options ; ///< Traits template parameters + + typedef typename bucket_type::value_type value_type ; ///< type of value stored in the list + typedef gc::nogc gc ; ///< Garbage collector + typedef typename bucket_type::key_comparator key_comparator ; ///< key comparison functor + + /// Hash functor for \ref value_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename options::hash >::type hash; + typedef typename options::item_counter item_counter ; ///< Item counter type + + /// Bucket table allocator + typedef cds::details::Allocator< bucket_type, typename options::allocator > bucket_table_allocator; + + protected: + //@cond + typedef typename bucket_type::iterator bucket_iterator; + typedef typename bucket_type::const_iterator bucket_const_iterator; + //@endcond + + protected: + item_counter m_ItemCounter ; ///< Item counter + hash m_HashFunctor ; ///< Hash functor + + bucket_type * m_Buckets ; ///< bucket table + + private: + //@cond + const size_t m_nHashBitmask; + //@endcond + + protected: + /// Calculates hash value of \p key + template + size_t hash_value( const Q& key ) const + { + return m_HashFunctor( key ) & m_nHashBitmask; + } + + /// Returns the bucket (ordered list) for \p key + template + bucket_type& bucket( const Q& key ) + { + return m_Buckets[ hash_value( key ) ]; + } + + public: + /// Forward iterator + /** + The forward iterator for Michael's set is based on \p OrderedList forward iterator and has some features: + - it has no post-increment operator + - it iterates items in unordered fashion + */ + typedef michael_set::details::iterator< bucket_type, false > iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef michael_set::details::iterator< bucket_type, true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() ); + } + + /// Returns a forward const iterator addressing the first element in a set + //@{ + const_iterator begin() const + { + return get_const_begin(); + } + const_iterator cbegin() + { + return get_const_begin(); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a set + //@{ + const_iterator end() const + { + return get_const_end(); + } + const_iterator cend() + { + return get_const_end(); + } + //@} + + private: + //@{ + const_iterator get_const_begin() const + { + return const_iterator( const_cast(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count() ); + } + const_iterator get_const_end() const + { + return const_iterator( const_cast(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() ); + } + //@} + + public: + /// Initialize hash set + /** + See \ref cds_nonintrusive_MichaelHashSet_hp "MichaelHashSet" ctor for explanation + */ + MichaelHashSet( + size_t nMaxItemCount, ///< estimation of max item count in the hash set + size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket + ) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) + { + // GC and OrderedList::gc must be the same + static_assert(( std::is_same::value ), "GC and OrderedList::gc must be the same"); + + // atomicity::empty_item_counter is not allowed as a item counter + static_assert(( !std::is_same::value ), "atomicity::empty_item_counter is not allowed as a item counter"); + + m_Buckets = bucket_table_allocator().NewArray( bucket_count() ); + } + + /// Clear hash set and destroy it + ~MichaelHashSet() + { + clear(); + bucket_table_allocator().Delete( m_Buckets, bucket_count() ); + } + + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Return an iterator pointing to inserted item if success, otherwise \ref end() + */ + template + iterator insert( const Q& val ) + { + bucket_type& refBucket = bucket( val ); + bucket_iterator it = refBucket.insert( val ); + + if ( it != refBucket.end() ) { + ++m_ItemCounter; + return iterator( it, &refBucket, m_Buckets + bucket_count() ); + } + + return end(); + } + +#ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Return an iterator pointing to inserted item if success \ref end() otherwise + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + iterator emplace( Args&&... args ) + { + bucket_type& refBucket = bucket( value_type(std::forward(args)...)); + bucket_iterator it = refBucket.emplace( std::forward(args)... ); + + if ( it != refBucket.end() ) { + ++m_ItemCounter; + return iterator( it, &refBucket, m_Buckets + bucket_count() ); + } + + return end(); + } +#endif + + /// Ensures that the item \p val exists in the set + /** + The operation inserts new item if the key \p val is not found in the set. + Otherwise, the function returns an iterator that points to item found. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted, \p second is true if new item has been added or \p false if the item + already is in the set. + */ + template + std::pair ensure( const Q& val ) + { + bucket_type& refBucket = bucket( val ); + std::pair ret = refBucket.ensure( val ); + + if ( ret.first != refBucket.end() ) { + if ( ret.second ) + ++m_ItemCounter; + return std::make_pair( iterator( ret.first, &refBucket, m_Buckets + bucket_count() ), ret.second ); + } + + return std::make_pair( end(), ret.second ); + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_MichealSet_nogc_find + The function searches the item with key equal to \p key + and returns an iterator pointed to item found if the key is found, + and \ref end() otherwise + */ + template + iterator find( Q const& key ) + { + bucket_type& refBucket = bucket( key ); + bucket_iterator it = refBucket.find( key ); + if ( it != refBucket.end() ) + return iterator( it, &refBucket, m_Buckets + bucket_count() ); + + return end(); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichealSet_nogc_find "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + iterator find_with( Q const& key, Less pred ) + { + bucket_type& refBucket = bucket( key ); + bucket_iterator it = refBucket.find_with( key, pred ); + if ( it != refBucket.end() ) + return iterator( it, &refBucket, m_Buckets + bucket_count() ); + + return end(); + } + + + /// Clears the set (non-atomic, not thread-safe) + /** + The function deletes all items from the set. + The function is not atomic and even not thread-safe. + It cleans up each bucket and then resets the item counter to zero. + If there are a thread that performs insertion while \p clear is working the result is undefined in general case: + empty() may return \p true but the set may contain item(s). + */ + void clear() + { + for ( size_t i = 0; i < bucket_count(); ++i ) + m_Buckets[i].clear(); + m_ItemCounter.reset(); + } + + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, the correct item counting feature is an important part of Michael's set implementation. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + size_t size() const + { + return m_ItemCounter; + } + + /// Returns the size of hash table + /** + Since MichaelHashSet cannot dynamically extend the hash table size, + the value returned is an constant depending on object initialization parameters; + see MichaelHashSet::MichaelHashSet for explanation. + */ + size_t bucket_count() const + { + return m_nHashBitmask + 1; + } + }; + +}} // cds::container + +#endif // ifndef __CDS_CONTAINER_MICHAEL_SET_NOGC_H diff --git a/cds/container/michael_set_rcu.h b/cds/container/michael_set_rcu.h new file mode 100644 index 00000000..67d4effe --- /dev/null +++ b/cds/container/michael_set_rcu.h @@ -0,0 +1,754 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MICHAEL_SET_RCU_H +#define __CDS_CONTAINER_MICHAEL_SET_RCU_H + +#include +#include + +namespace cds { namespace container { + + /// Michael's hash set (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_nonintrusive_set + \anchor cds_nonintrusive_MichaelHashSet_rcu + + Source: + - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" + + Michael's hash table algorithm is based on lock-free ordered list and it is very simple. + The main structure is an array \p T of size \p M. Each element in \p T is basically a pointer + to a hash bucket, implemented as a singly linked list. The array of buckets cannot be dynamically expanded. + However, each bucket may contain unbounded number of items. + + Template parameters are: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p OrderedList - ordered list implementation used as bucket for hash set, for example, MichaelList. + The ordered list implementation specifies the type \p T stored in the hash-set, + the comparison functor for the type \p T and other features specific for + the ordered list. + - \p Traits - type traits. See michael_set::type_traits for explanation. + + Instead of defining \p Traits struct you may use option-based syntax with michael_set::make_traits metafunction. + For michael_set::make_traits the following option may be used: + - opt::hash - mandatory option, specifies hash functor. + - opt::item_counter - optional, specifies item counting policy. See michael_set::type_traits for explanation. + - opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR. + + \note About hash functor see \ref cds_nonintrusive_MichaelHashSet_hash_functor "MichaelSet". + + How to use + + Suppose, we have the following type \p Foo that we want to store in our MichaelHashSet: + \code + struct Foo { + int nKey ; // key field + int nVal ; // value field + }; + \endcode + + To use \p %MichaelHashSet for \p Foo values, you should first choose suitable ordered list class + that will be used as a bucket for the set. We will cds::urcu::general_buffered<> RCU type and + MichaelList as a bucket type. + You should include RCU-related header file (cds/urcu/general_buffered.h in this example) + before including cds/container/michael_set_rcu.h. + Also, for ordered list we should develop a comparator for our \p Foo struct. + \code + #include + #include + #include + + namespace cc = cds::container; + + // Foo comparator + struct Foo_cmp { + int operator ()(Foo const& v1, Foo const& v2 ) const + { + if ( std::less( v1.nKey, v2.nKey )) + return -1; + return std::less(v2.nKey, v1.nKey) ? 1 : 0; + } + }; + + // Our ordered list + typedef cc::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, Foo, + typename cc::michael_list::make_traits< + cc::opt::compare< Foo_cmp > // item comparator option + >::type + > bucket_list; + + // Hash functor for Foo + struct foo_hash { + size_t operator ()( int i ) const + { + return std::hash( i ); + } + size_t operator()( Foo const& i ) const + { + return std::hash( i.nKey ); + } + }; + + // Declare set type. + // Note that \p RCU template parameter of ordered list must be equal \p RCU for the set. + typedef cc::MichaelHashSet< cds::urcu::gc< cds::urcu::general_buffered<> >, bucket_list, + cc::michael_set::make_traits< + cc::opt::hash< foo_hash > + >::type + > foo_set; + + // Set variable + foo_set fooSet; + \endcode + */ + template < + class RCU, + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_set::type_traits +#else + class Traits +#endif + > + class MichaelHashSet< cds::urcu::gc< RCU >, OrderedList, Traits > + { + public: + typedef OrderedList bucket_type ; ///< type of ordered list used as a bucket implementation + typedef Traits options ; ///< Traits template parameters + + typedef typename bucket_type::value_type value_type ; ///< type of value stored in the list + typedef cds::urcu::gc< RCU > gc ; ///< RCU used as garbage collector + typedef typename bucket_type::key_comparator key_comparator ; ///< key comparing functor + + /// Hash functor for \ref value_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename options::hash >::type hash; + typedef typename options::item_counter item_counter ; ///< Item counter type + + /// Bucket table allocator + typedef cds::details::Allocator< bucket_type, typename options::allocator > bucket_table_allocator; + + typedef typename bucket_type::rcu_lock rcu_lock ; ///< RCU scoped lock + typedef typename bucket_type::exempt_ptr exempt_ptr ; ///< pointer to extracted node + /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that + static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = bucket_type::c_bExtractLockExternal; + + protected: + item_counter m_ItemCounter ; ///< Item counter + hash m_HashFunctor ; ///< Hash functor + + bucket_type * m_Buckets ; ///< bucket table + + private: + //@cond + const size_t m_nHashBitmask; + //@endcond + + protected: + /// Calculates hash value of \p key + template + size_t hash_value( Q const& key ) const + { + return m_HashFunctor( key ) & m_nHashBitmask; + } + + /// Returns the bucket (ordered list) for \p key + //@{ + template + bucket_type& bucket( Q const& key ) + { + return m_Buckets[ hash_value( key ) ]; + } + template + bucket_type const& bucket( Q const& key ) const + { + return m_Buckets[ hash_value( key ) ]; + } + //@} + public: + /// Forward iterator + /** + The forward iterator for Michael's set is based on \p OrderedList forward iterator and has some features: + - it has no post-increment operator + - it iterates items in unordered fashion + - The iterator cannot be moved across thread boundary since it may contain GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the set. + + Therefore, the use of iterators in concurrent environment is not good idea. Use the iterator for the concurrent container + for debug purpose only. + */ + typedef michael_set::details::iterator< bucket_type, false > iterator; + + /// Const forward iterator + typedef michael_set::details::iterator< bucket_type, true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() ); + } + + /// Returns a forward const iterator addressing the first element in a set + //@{ + const_iterator begin() const + { + return get_const_begin(); + } + const_iterator cbegin() + { + return get_const_begin(); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a set + //@{ + const_iterator end() const + { + return get_const_end(); + } + const_iterator cend() + { + return get_const_end(); + } + //@} + + private: + //@cond + const_iterator get_const_begin() const + { + return const_iterator( const_cast(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count() ); + } + const_iterator get_const_end() const + { + return const_iterator( const_cast(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() ); + } + //@endcond + + public: + /// Initialize hash set + /** + The Michael's hash set is non-expandable container. You should point the average count of items \p nMaxItemCount + when you create an object. + \p nLoadFactor parameter defines average count of items per bucket and it should be small number between 1 and 10. + Remember, since the bucket implementation is an ordered list, searching in the bucket is linear [O(nLoadFactor)]. + Note, that many popular STL hash map implementation uses load factor 1. + + The ctor defines hash table size as rounding nMacItemCount / nLoadFactor up to nearest power of two. + */ + MichaelHashSet( + size_t nMaxItemCount, ///< estimation of max item count in the hash set + size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket + ) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) + { + // GC and OrderedList::gc must be the same + static_assert(( std::is_same::value ), "GC and OrderedList::gc must be the same"); + + // atomicity::empty_item_counter is not allowed as a item counter + static_assert(( !std::is_same::value ), "atomicity::empty_item_counter is not allowed as a item counter"); + + m_Buckets = bucket_table_allocator().NewArray( bucket_count() ); + } + + /// Clear hash set and destroy it + ~MichaelHashSet() + { + clear(); + bucket_table_allocator().Delete( m_Buckets, bucket_count() ); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain as minimum the complete key for the node. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + The function applies RCU lock internally. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + const bool bRet = bucket( val ).insert( val ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-fields of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. It may be passed by reference + using boost::ref + + The function applies RCU lock internally. + */ + template + bool insert( Q const& val, Func f ) + { + const bool bRet = bucket( val ).insert( val, f ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Ensures that the item exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p val key not found in the set, then the new item created from \p val + is inserted into the set. Otherwise, the functor \p func is called with the item found. + The functor \p Func should be a function with signature: + \code + void func( bool bNew, value_type& item, const Q& val ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p key passed into the \p ensure function + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + The function applies RCU lock internally. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair ensure( const Q& val, Func func ) + { + std::pair bRet = bucket( val ).ensure( val, func ); + if ( bRet.first && bRet.second ) + ++m_ItemCounter; + return bRet; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + The function applies RCU lock internally. + + @note This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + bool bRet = bucket( value_type(std::forward(args)...) ).emplace( std::forward(args)... ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } +# endif + + /// Deletes \p key from the set + /** \anchor cds_nonintrusive_MichealSet_rcu_erase_val + + Since the key of MichaelHashSet's item type \ref value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The set item comparator should be able to compare the type \p value_type + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + const bool bRet = bucket( key ).erase( key ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichealSet_rcu_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + const bool bRet = bucket( key ).erase_with( key, pred ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes \p key from the set + /** \anchor cds_nonintrusive_MichealSet_rcu_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type const& val); + }; + \endcode + The functor may be passed by reference using boost:ref + + Since the key of %MichaelHashSet's \p value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { + const bool bRet = bucket( key ).erase( key, f ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichealSet_rcu_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + const bool bRet = bucket( key ).erase_with( key, pred, f ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Extracts an item from the set + /** \anchor cds_nonintrusive_MichaelHashSet_rcu_extract + The function searches an item with key equal to \p val in the set, + unlinks it from the set, places item pointer into \p dest argument, and returns \p true. + If the item with the key equal to \p val is not found the function return \p false. + + @note The function does NOT call RCU read-side lock or synchronization, + and does NOT dispose the item found. It just excludes the item from the set + and returns a pointer to item found. + You should lock RCU before calling of the function, and you should synchronize RCU + outside the RCU lock to free extracted item + + \code + #include + #include + #include + + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::container::MichaelList< rcu, Foo > rcu_michael_list; + typedef cds::container::MichaelHashSet< rcu, rcu_michael_list, foo_traits > rcu_michael_set; + + rcu_michael_set theSet; + // ... + + rcu_michael_set::exempt_ptr p; + { + // first, we should lock RCU + rcu_michael_set::rcu_lock lock; + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + if ( theSet.extract( p, 10 )) { + // do something with p + ... + } + } + + // We may safely release p here + // release() passes the pointer to RCU reclamation cycle + p.release(); + \endcode + */ + template + bool extract( exempt_ptr& dest, Q const& val ) + { + if ( bucket( val ).extract( dest, val )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Extracts an item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelHashSet_rcu_extract "extract(exempt_ptr&, Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool extract_with( exempt_ptr& dest, Q const& val, Less pred ) + { + if ( bucket( val ).extract_with( dest, val, pred )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Finds the key \p val + /** \anchor cds_nonintrusive_MichealSet_rcu_find_func + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \p value_type. + + The function applies RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) const + { + return bucket( val ).find( val, f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichealSet_rcu_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) const + { + return bucket( val ).find_with( val, pred, f ); + } + + /// Finds the key \p val + /** \anchor cds_nonintrusive_MichealSet_rcu_find_cfunc + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \p value_type. + + The function applies RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) const + { + return bucket( val ).find( val, f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichealSet_rcu_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred, Func f ) const + { + return bucket( val ).find_with( val, pred, f ); + } + + /// Finds the key \p val + /** \anchor cds_nonintrusive_MichealSet_rcu_find_val + + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \ref value_type. + */ + template + bool find( Q const & val ) const + { + return bucket( val ).find( val ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichealSet_rcu_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const & val, Less pred ) const + { + return bucket( val ).find_with( val, pred ); + } + + /// Finds the key \p val and return the item found + /** \anchor cds_nonintrusive_MichaelHashSet_rcu_get + The function searches the item with key equal to \p val and returns the pointer to item found. + If \p val is not found it returns \p NULL. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::container::MichaelHashSet< your_template_parameters > hash_set; + hash_set theSet; + // ... + { + // Lock RCU + hash_set::rcu_lock lock; + + foo * pVal = theSet.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + // Unlock RCU by rcu_lock destructor + // pVal can be freed at any time after RCU has been unlocked + } + \endcode + */ + template + value_type * get( Q const& val ) const + { + return bucket( val ).get( val ); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_nonintrusive_MichaelHashSet_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + value_type * get_with( Q const& val, Less pred ) const + { + return bucket( val ).get_with( val, pred ); + } + + /// Clears the set (non-atomic) + /** + The function erases all items from the set. + + The function is not atomic. It cleans up each bucket and then resets the item counter to zero. + If there are a thread that performs insertion while \p clear is working the result is undefined in general case: + empty() may return \p true but the set may contain item(s). + Therefore, \p clear may be used only for debugging purposes. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + void clear() + { + for ( size_t i = 0; i < bucket_count(); ++i ) + m_Buckets[i].clear(); + m_ItemCounter.reset(); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, the correct item counting feature is an important part of Michael's set implementation. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + size_t size() const + { + return m_ItemCounter; + } + + /// Returns the size of hash table + /** + Since MichaelHashSet cannot dynamically extend the hash table size, + the value returned is an constant depending on object initialization parameters; + see MichaelHashSet::MichaelHashSet for explanation. + */ + size_t bucket_count() const + { + return m_nHashBitmask + 1; + } + }; + +}} // namespace cds::container + +#endif // ifndef __CDS_CONTAINER_MICHAEL_SET_H diff --git a/cds/container/moir_queue.h b/cds/container/moir_queue.h new file mode 100644 index 00000000..fba29773 --- /dev/null +++ b/cds/container/moir_queue.h @@ -0,0 +1,339 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MOIR_QUEUE_H +#define __CDS_CONTAINER_MOIR_QUEUE_H + +#include +#include +#include +#include +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + template + struct make_moir_queue + { + typedef GC gc; + typedef T value_type; + + struct default_options { + typedef cds::backoff::empty back_off; + typedef CDS_DEFAULT_ALLOCATOR allocator; + typedef atomicity::empty_item_counter item_counter; + typedef intrusive::queue_dummy_stat stat; + typedef opt::v::relaxed_ordering memory_model; + enum { alignment = opt::cache_line_alignment }; + }; + + typedef typename opt::make_options< + typename cds::opt::find_type_traits< default_options, CDS_OPTIONS7 >::type + ,CDS_OPTIONS7 + >::type options; + + struct node_type: public intrusive::single_link::node< gc > + { + value_type m_value; + + node_type( const value_type& val ) + : m_value( val ) + {} +# ifdef CDS_EMPLACE_SUPPORT + template + node_type( Args&&... args ) + : m_value( std::forward(args)...) + {} +# else + node_type() + {} +# endif + }; + + typedef typename options::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + + typedef intrusive::MoirQueue< + gc + ,node_type + ,intrusive::opt::hook< + intrusive::single_link::base_hook< opt::gc > + > + ,opt::back_off< typename options::back_off > + ,intrusive::opt::disposer< node_deallocator > + ,opt::item_counter< typename options::item_counter > + ,opt::stat< typename options::stat > + ,opt::alignment< options::alignment > + ,opt::memory_model< typename options::memory_model > + > type; + }; + } + //@endcond + + /// A variation of Michael & Scott's lock-free queue + /** @ingroup cds_nonintrusive_queue + It is non-intrusive version of intrusive::MoirQueue. + + \p T is a type stored in the queue. It should be default-constructible, copy-constructible, assignable type. + + \p Options description see MSQueue + */ + template + class MoirQueue: +#ifdef CDS_DOXYGEN_INVOKED + intrusive::MoirQueue< GC, intrusive::single_link::node< T >, Options... > +#else + details::make_moir_queue< GC, T, CDS_OPTIONS7 >::type +#endif + { + //@cond + typedef details::make_moir_queue< GC, T, CDS_OPTIONS7 > options; + typedef typename options::type base_class; + //@endcond + + public: + /// Rebind template arguments + template + struct rebind { + typedef MoirQueue< GC2, T2, CDS_OTHER_OPTIONS7> other ; ///< Rebinding result + }; + + public: + typedef T value_type ; ///< Value type stored in the stack + + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename options::options::item_counter item_counter ; ///< Item counting policy used + typedef typename options::options::stat stat ; ///< Internal statistics policy used + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + protected: + typedef typename options::node_type node_type ; ///< queue node type (derived from intrusive::single_link::node) + + //@cond + typedef typename options::cxx_allocator cxx_allocator; + typedef typename options::node_deallocator node_deallocator; // deallocate node + typedef typename base_class::node_traits node_traits; + //@endcond + + protected: + ///@cond + static node_type * alloc_node() + { + return cxx_allocator().New(); + } + static node_type * alloc_node( const value_type& val ) + { + return cxx_allocator().New( val ); + } +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node_move( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( args )... ); + } +# endif + static void free_node( node_type * p ) + { + node_deallocator()( p ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + public: + /// Initializes empty queue + MoirQueue() + {} + + /// Destructor clears the queue + ~MoirQueue() + {} + + /// Returns queue's item count (see \ref intrusive::MSQueue::size for explanation) + size_t size() const + { + return base_class::size(); + } + + /// Returns refernce to internal statistics + const stat& statistics() const + { + return base_class::statistics(); + } + + /// Enqueues \p val value into the queue. + /** + The function makes queue node in dynamic memory calling copy constructor for \p val + and then it calls intrusive::MSQueue::enqueue. + Returns \p true if success, \p false otherwise. + */ + bool enqueue( value_type const& val ) + { + scoped_node_ptr p( alloc_node(val) ); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Enqueues \p data to queue using copy functor + /** + \p Func is a functor called to copy value \p data of type \p Type + which may be differ from type \p T stored in the queue. + The functor's interface is: + \code + struct myFunctor { + void operator()(T& dest, SOURCE const& data) + { + // // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + + Requirements The functor \p Func should not throw any exception. + */ + template + bool enqueue( const Type& data, Func f ) + { + scoped_node_ptr p( alloc_node()); + unref(f)( node_traits::to_value_ptr( *p )->m_value, data ); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Dequeues a value using copy functor + /** + \p Func is a functor called to copy dequeued value to \p dest of type \p Type + which may be differ from type \p T stored in the queue. + The functor's interface is: + \code + struct myFunctor { + void operator()(Type& dest, T const& data) + { + // // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + + Requirements The functor \p Func should not throw any exception. + */ + template + bool dequeue( Type& dest, Func f ) + { + typename base_class::dequeue_result res; + if ( base_class::do_dequeue( res )) { + unref(f)( dest, node_traits::to_value_ptr( *res.pNext )->m_value ); + + base_class::dispose_result( res ); + + return true; + } + return false; + } + + /// Dequeues a value from the queue + /** + If queue is not empty, the function returns \p true, \p dest contains copy of + dequeued value. The assignment operator for type \ref value_type is invoked. + If queue is empty, the function returns \p false, \p dest is unchanged. + */ + bool dequeue( value_type& dest ) + { + typedef cds::details::trivial_assign functor; + return dequeue( dest, functor() ); + } + + /// Synonym for \ref enqueue function + bool push( const value_type& val ) + { + return enqueue( val ); + } + + /// Synonym for template version of \ref enqueue function + template + bool push( const Type& data, Func f ) + { + return enqueue( data, f ); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Enqueues data of type \ref value_type constructed with std::forward(args)... + /** + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr p( alloc_node_move( std::forward(args)... )); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } +# endif + + + /// Synonym for \ref dequeue function + bool pop( value_type& dest ) + { + return dequeue( dest ); + } + + /// Synonym for template version of \ref dequeue function + template + bool pop( Type& dest, Func f ) + { + return dequeue( dest, f ); + } + + /// Checks if the queue is empty + bool empty() const + { + return base_class::empty(); + } + + /// Clear the queue + /** + The function repeatedly calls \ref dequeue until it returns NULL. + The disposer defined in template \p Options is called for each item + that can be safely disposed. + */ + void clear() + { + base_class::clear(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_MOIR_QUEUE_H + + diff --git a/cds/container/mspriority_queue.h b/cds/container/mspriority_queue.h new file mode 100644 index 00000000..6620f534 --- /dev/null +++ b/cds/container/mspriority_queue.h @@ -0,0 +1,337 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MSPRIORITY_QUEUE_H +#define __CDS_CONTAINER_MSPRIORITY_QUEUE_H + +#include +#include +#include + +namespace cds { namespace container { + + /// MSPriorityQueue related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace mspriority_queue { + +#ifdef CDS_DOXYGEN_INVOKED + /// Synonym for cds::intrusive::mspriority_queue::stat + typedef cds::intrusive::mspriority_queue::stat<> stat; + + /// Synonym for cds::intrusive::mspriority_queue::empty_stat + typedef cds::intrusive::mspriority_queue::empty_stat empty_stat; +#else + using cds::intrusive::mspriority_queue::stat; + using cds::intrusive::mspriority_queue::empty_stat; +#endif + + /// Type traits for MSPriorityQueue + /** + The type traits for cds::container::MSPriorityQueue is the same as for + cds::intrusive::MSPriorityQueue (see cds::intrusive::mspriority_queue::type_traits) + plus some additional properties. + */ + struct type_traits: public cds::intrusive::mspriority_queue::type_traits + { + /// The allocator use to allocate memory for values + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Move policy + /** + The move policy used in MSPriorityQueue::pop functions + to move item's value. + Default is opt::v::assignment_move_policy. + */ + typedef cds::opt::v::assignment_move_policy move_policy; + }; + + /// Metafunction converting option list to traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + + See \ref MSPriorityQueue, \ref type_traits, \ref cds::opt::make_options. + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS9 >::type + ,CDS_OPTIONS9 + >::type type; +# endif + }; + } // namespace mspriority_queue + + /// Michael & Scott array-based lock-based concurrent priority queue heap + /** @ingroup cds_nonintrusive_priority_queue + Source: + - [1996] G.Hunt, M.Michael, S. Parthasarathy, M.Scott + "An efficient algorithm for concurrent priority queue heaps" + + \p %MSPriorityQueue augments the standard array-based heap data structure with + a mutual-exclusion lock on the heap's size and locks on each node in the heap. + Each node also has a tag that indicates whether + it is empty, valid, or in a transient state due to an update to the heap + by an inserting thread. + The algorithm allows concurrent insertions and deletions in opposite directions, + without risking deadlock and without the need for special server threads. + It also uses a "bit-reversal" technique to scatter accesses across the fringe + of the tree to reduce contention. + On large heaps the algorithm achieves significant performance improvements + over serialized single-lock algorithm, for various insertion/deletion + workloads. For small heaps it still performs well, but not as well as + single-lock algorithm. + + Template parameters: + - \p T - type to be stored in the list. The priority is a part of \p T type. + - \p Traits - type traits. See mspriority_queue::type_traits for explanation. + + It is possible to declare option-based queue with cds::container::mspriority_queue::make_traits + metafunction instead of \p Traits template argument. + Template argument list \p Options of \p %cds::container::mspriority_queue::make_traits metafunction are: + - opt::buffer - the buffer type for heap array. Possible type are: opt::v::static_buffer, opt::v::dynamic_buffer. + Default is \p %opt::v::dynamic_buffer. + You may specify any type of values for the buffer since at instantiation time + the \p buffer::rebind member metafunction is called to change the type of values stored in the buffer. + - opt::compare - priority compare functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for priority compare. Default is \p std::less. + - opt::lock_type - lock type. Default is cds::lock::Spin. + - opt::back_off - back-off strategy. Default is cds::backoff::yield + - opt::allocator - allocator (like \p std::allocator) for the values of queue's items. + Default is \ref CDS_DEFAULT_ALLOCATOR + - opt::move_policy - policy for moving item's value. Default is opt::v::assignment_move_policy. + If the compiler supports move semantics it would be better to specify the move policy + based on the move semantics for type \p T. + - opt::stat - internal statistics. Available types: mspriority_queue::stat, mspriority_queue::empty_stat (the default) + */ + template + class MSPriorityQueue: protected cds::intrusive::MSPriorityQueue< T, Traits > + { + //@cond + typedef cds::intrusive::MSPriorityQueue< T, Traits > base_class; + //@endcond + public: + typedef T value_type ; ///< Value type stored in the queue + typedef Traits traits ; ///< Traits template parameter + + typedef typename base_class::key_comparator key_comparator; ///< priority comparing functor based on opt::compare and opt::less option setter. + typedef typename base_class::lock_type lock_type; ///< heap's size lock type + typedef typename base_class::back_off back_off ; ///< Back-off strategy + typedef typename base_class::stat stat ; ///< internal statistics type + typedef typename traits::allocator::template rebind::other allocator_type; ///< Value allocator + typedef typename traits::move_policy move_policy; ///< Move policy for type \p T + + protected: + //@cond + typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator; + + struct value_deleter { + void operator()( value_type * p ) const + { + cxx_allocator().Delete( p ); + } +# ifndef CDS_CXX11_LAMBDA_SUPPORT + void operator()( value_type& p ) const + { + cxx_allocator().Delete( &p ); + } +# endif + }; + typedef std::unique_ptr scoped_ptr; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + template + struct clear_wrapper + { + Func& func; + clear_wrapper( Func& f ): func(f) {} + + void operator()( value_type& src ) const + { + cds::unref(func)( src ); + value_deleter()( &src ); + } + }; +# endif + + //@endcond + + public: + /// Constructs empty priority queue + /** + For cds::opt::v::static_buffer the \p nCapacity parameter is ignored. + */ + MSPriorityQueue( size_t nCapacity ) + : base_class( nCapacity ) + {} + + /// Clears priority queue and destructs the object + ~MSPriorityQueue() + { + clear(); + } + + /// Inserts a item into priority queue + /** + If the priority queue is full, the function returns \p false, + no item has been added. + Otherwise, the function inserts the copy of \p val into the heap + and returns \p true. + + The function use copy constructor to create new heap item from \p val. + */ + bool push( value_type const& val ) + { + scoped_ptr pVal( cxx_allocator().New( val )); + if ( base_class::push( *(pVal.get()) )) { + pVal.release(); + return true; + } + return false; + } + +#ifdef CDS_EMPLACE_SUPPORT + /// Inserts a item into priority queue + /** + If the priority queue is full, the function returns \p false, + no item has been added. + Otherwise, the function inserts a new item created from \p args arguments + into the heap and returns \p true. + + The function is available only for compilers supporting variable template + and move semantics C++11 feature. + */ + template + bool emplace( Args&&... args ) + { + scoped_ptr pVal( cxx_allocator().MoveNew( std::forward(args)... )); + if ( base_class::push( *(pVal.get()) )) { + pVal.release(); + return true; + } + return false; + } +#endif + + /// Extracts item with high priority + /** + If the priority queue is empty, the function returns \p false. + Otherwise, it returns \p true and \p dest contains the copy of extracted item. + The item is deleted from the heap. + + The function uses \ref move_policy to move extracted value from the heap's top + to \p dest. + + The function is equivalent of such call: + \code + pop_with( dest, move_policy() ); + \endcode + */ + bool pop( value_type& dest ) + { + return pop_with( dest, move_policy() ); + } + + /// Extracts item with high priority + /** + If the priority queue is empty, the function returns \p false. + Otherwise, it returns \p true and \p dest contains the copy of extracted item. + The item is deleted from the heap. + + The function uses \p MoveFunc \p f to move extracted value from the heap's top + to \p dest. The interface of \p MoveFunc is: + \code + struct move_functor { + void operator()( Q& dest, T& src ); + }; + \endcode + In \p MoveFunc you may use move semantics for \p src argument + since \p src will be destroyed. + */ + template + bool pop_with( Q& dest, MoveFunc f ) + { + value_type * pVal = base_class::pop(); + if ( pVal ) { + cds::unref(f)( dest, *pVal ); + cxx_allocator().Delete( pVal ); + return true; + } + return false; + } + + /// Clears the queue (not atomic) + /** + This function is no atomic, but thread-safe + */ + void clear() + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + base_class::clear_with( []( value_type& src ) { cxx_allocator().Delete( &src ); }); +# else + base_class::clear_with( value_deleter() ); +# endif + } + + /// Clears the queue (not atomic) + /** + This function is no atomic, but thread-safe. + + For each item removed the functor \p f is called. + \p Func interface is: + \code + struct clear_functor + { + void operator()( value_type& item ); + }; + \endcode + A lambda function or a function pointer can be used as \p f. + */ + template + void clear_with( Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + base_class::clear_with( [&f]( value_type& val ) { cds::unref(f)(val); value_deleter()( &val ); } ); +# else + clear_wrapper c(f); + base_class::clear_with( cds::ref(c)); +# endif + } + + /// Checks is the priority queue is empty + bool empty() const + { + return base_class::empty(); + } + + /// Checks if the priority queue is full + bool full() const + { + return base_class::full(); + } + + /// Returns current size of priority queue + size_t size() const + { + return base_class::size(); + } + + /// Return capacity of the priority queue + size_t capacity() const + { + return base_class::capacity(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_MSPRIORITY_QUEUE_H diff --git a/cds/container/msqueue.h b/cds/container/msqueue.h new file mode 100644 index 00000000..29c33553 --- /dev/null +++ b/cds/container/msqueue.h @@ -0,0 +1,345 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_MSQUEUE_H +#define __CDS_CONTAINER_MSQUEUE_H + +#include +#include +#include +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + template + struct make_msqueue + { + typedef GC gc; + typedef T value_type; + + struct default_options { + typedef cds::backoff::empty back_off; + typedef CDS_DEFAULT_ALLOCATOR allocator; + typedef atomicity::empty_item_counter item_counter; + typedef intrusive::queue_dummy_stat stat; + typedef opt::v::relaxed_ordering memory_model; + enum { alignment = opt::cache_line_alignment }; + }; + + typedef typename opt::make_options< + typename cds::opt::find_type_traits< default_options, CDS_OPTIONS7 >::type + ,CDS_OPTIONS7 + >::type options; + + struct node_type: public intrusive::single_link::node< gc > + { + value_type m_value; + node_type( const value_type& val ) + : m_value( val ) + {} +# ifdef CDS_EMPLACE_SUPPORT + template + node_type( Args&&... args ) + : m_value( std::forward(args)...) + {} +# else + node_type() + {} +# endif + }; + + typedef typename options::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + + typedef intrusive::MSQueue< gc, + node_type + ,intrusive::opt::hook< + intrusive::single_link::base_hook< opt::gc > + > + ,opt::back_off< typename options::back_off > + ,intrusive::opt::disposer< node_deallocator > + ,opt::item_counter< typename options::item_counter > + ,opt::stat< typename options::stat > + ,opt::alignment< options::alignment > + ,opt::memory_model< typename options::memory_model > + > type; + }; + } + //@endcond + + /// Michael & Scott lock-free queue + /** @ingroup cds_nonintrusive_queue + It is non-intrusive version of Michael & Scott's queue algorithm based on intrusive implementation + intrusive::MSQueue. + + Template arguments: + - \p GC - garbage collector type: gc::HP, gc::HRC, gc::PTB + - \p T is a type stored in the queue. It should be default-constructible, copy-constructible, assignable type. + - \p Options - options + + Permissible \p Options: + - opt::allocator - allocator (like \p std::allocator). Default is \ref CDS_DEFAULT_ALLOCATOR + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter + - opt::stat - the type to gather internal statistics. + Possible option value are: intrusive::queue_stat, intrusive::queue_dummy_stat, + user-provided class that supports intrusive::queue_stat interface. + Default is \ref intrusive::queue_dummy_stat. + - opt::alignment - the alignment for internal queue data. Default is opt::cache_line_alignment + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + */ + template + class MSQueue: +#ifdef CDS_DOXYGEN_INVOKED + intrusive::MSQueue< GC, intrusive::single_link::node< T >, Options... > +#else + details::make_msqueue< GC, T, CDS_OPTIONS7 >::type +#endif + { + //@cond + typedef details::make_msqueue< GC, T, CDS_OPTIONS7 > options; + typedef typename options::type base_class; + //@endcond + + public: + /// Rebind template arguments + template + struct rebind { + typedef MSQueue< GC2, T2, CDS_OTHER_OPTIONS7> other ; ///< Rebinding result + }; + + public: + typedef T value_type ; ///< Value type stored in the queue + + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename base_class::stat stat ; ///< Internal statistics policy used + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + protected: + typedef typename options::node_type node_type ; ///< queue node type (derived from intrusive::single_link::node) + + //@cond + typedef typename options::cxx_allocator cxx_allocator; + typedef typename options::node_deallocator node_deallocator; // deallocate node + typedef typename base_class::node_traits node_traits; + //@endcond + + protected: + ///@cond + static node_type * alloc_node() + { + return cxx_allocator().New(); + } + static node_type * alloc_node( value_type const& val ) + { + return cxx_allocator().New( val ); + } +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node_move( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( args )... ); + } +# endif + static void free_node( node_type * p ) + { + node_deallocator()( p ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + public: + /// Initializes empty queue + MSQueue() + {} + + /// Destructor clears the queue + ~MSQueue() + {} + + /// Returns queue's item count (see \ref intrusive::MSQueue::size for explanation) + size_t size() const + { + return base_class::size(); + } + + /// Returns reference to internal statistics + const stat& statistics() const + { + return base_class::statistics(); + } + + /// Enqueues \p val value into the queue. + /** + The function makes queue node in dynamic memory calling copy constructor for \p val + and then it calls intrusive::MSQueue::enqueue. + Returns \p true if success, \p false otherwise. + */ + bool enqueue( value_type const& val ) + { + scoped_node_ptr p( alloc_node(val) ); + if ( base_class::enqueue( *p ) ) { + p.release(); + return true; + } + return false; + } + + /// Enqueues \p data to queue using copy functor + /** + \p Func is a functor called to copy value \p data of type \p Type + which may be differ from type \ref value_type stored in the queue. + The functor's interface is: + \code + struct myFunctor { + void operator()(value_type& dest, Type const& data) + { + // // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + + Requirements The functor \p Func should not throw any exception. + */ + template + bool enqueue( Type const& data, Func f ) + { + scoped_node_ptr p( alloc_node() ); + unref(f)( p->m_value, data ); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Dequeues a value using copy functor + /** + \p Func is a functor called to copy dequeued value to \p dest of type \p Type + which may be differ from type \ref value_type stored in the queue. + The functor's interface is: + \code + struct myFunctor { + void operator()(Type& dest, value_type const& data) + { + // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + + Requirements The functor \p Func should not throw any exception. + */ + template + bool dequeue( Type& dest, Func f ) + { + typename base_class::dequeue_result res; + if ( base_class::do_dequeue( res )) { + unref(f)( dest, node_traits::to_value_ptr( *res.pNext )->m_value ); + + base_class::dispose_result( res ); + + return true; + } + return false; + } + + /// Dequeues a value from the queue + /** + If queue is not empty, the function returns \p true, \p dest contains copy of + dequeued value. The assignment operator for type \ref value_type is invoked. + If queue is empty, the function returns \p false, \p dest is unchanged. + */ + bool dequeue( value_type& dest ) + { + typedef cds::details::trivial_assign functor; + return dequeue( dest, functor() ); + } + + /// Synonym for \ref enqueue function + bool push( value_type const& val ) + { + return enqueue( val ); + } + + /// Synonym for template version of \ref enqueue function + template + bool push( Type const& data, Func f ) + { + return enqueue( data, f ); + } + + /// Synonym for \ref dequeue function + bool pop( value_type& dest ) + { + return dequeue( dest ); + } + + /// Synonym for template version of \ref dequeue function + template + bool pop( Type& dest, Func f ) + { + return dequeue( dest, f ); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Enqueues data of type \ref value_type constructed with std::forward(args)... + /** + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr p( alloc_node_move( std::forward(args)... ) ); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } +# endif + + /// Checks if the queue is empty + bool empty() const + { + return base_class::empty(); + } + + /// Clear the queue + /** + The function repeatedly calls \ref dequeue until it returns NULL. + */ + void clear() + { + base_class::clear(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_MSQUEUE_H diff --git a/cds/container/optimistic_queue.h b/cds/container/optimistic_queue.h new file mode 100644 index 00000000..ca92861a --- /dev/null +++ b/cds/container/optimistic_queue.h @@ -0,0 +1,354 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_OPTIMISTIC_QUEUE_H +#define __CDS_CONTAINER_OPTIMISTIC_QUEUE_H + +#include +#include +#include +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + template + struct make_optimistic_queue + { + typedef GC gc; + typedef T value_type; + + struct default_options { + typedef cds::backoff::empty back_off; + typedef CDS_DEFAULT_ALLOCATOR allocator; + typedef atomicity::empty_item_counter item_counter; + typedef intrusive::optimistic_queue::dummy_stat stat; + typedef opt::v::relaxed_ordering memory_model; + enum { alignment = opt::cache_line_alignment }; + }; + + typedef typename opt::make_options< + typename cds::opt::find_type_traits< default_options, CDS_OPTIONS7 >::type + ,CDS_OPTIONS7 + >::type options; + + struct node_type: public intrusive::optimistic_queue::node< gc > + { + value_type m_value; + + node_type( value_type const& val ) + : m_value( val ) + {} +# ifdef CDS_EMPLACE_SUPPORT + template + node_type( Args&&... args ) + : m_value( std::forward(args)...) + {} +# else + node_type() + {} +# endif + }; + + typedef typename options::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + + typedef intrusive::OptimisticQueue< gc, + node_type + ,intrusive::opt::hook< + intrusive::optimistic_queue::base_hook< opt::gc > + > + ,opt::back_off< typename options::back_off > + ,intrusive::opt::disposer< node_deallocator > + ,opt::item_counter< typename options::item_counter > + ,opt::stat< typename options::stat > + ,opt::alignment< options::alignment > + ,opt::memory_model< typename options::memory_model > + > type; + }; + } // namespace details + //@endcond + + /// Optimistic queue + /** @ingroup cds_nonintrusive_queue + Implementation of Ladan-Mozes & Shavit optimistic queue algorithm. + + \par Source: + [2008] Edya Ladan-Mozes, Nir Shavit "An Optimistic Approach to Lock-Free FIFO Queues" + + Template arguments: + - \p GC - garbage collector type: gc::HP, gc::PTB. Note that gc::HRC is not supported + - \p T - type to be stored in the queue + - \p Options - options + + \p Options are: + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. + - opt::allocator - allocator (like \p std::allocator) used for nodes allocation. Default is \ref CDS_DEFAULT_ALLOCATOR + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter + - opt::stat - the type to gather internal statistics for debugging and profiling purposes. + Possible option value are: intrusive::optimistic_queue::stat, intrusive::optimistic_queue::dummy_stat (the default), + user-provided class that supports intrusive::optimistic_queue::stat interface. + Generic option intrusive::queue_stat and intrusive::queue_dummy_stat are acceptable too, however, + they will be automatically converted to intrusive::optimistic_queue::stat and intrusive::optimistic_queue::dummy_stat + respectively. + - opt::alignment - the alignment for internal queue data. Default is opt::cache_line_alignment + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + + Warning gc::HRC is not supported for this implementation. + */ + template + class OptimisticQueue: +#ifdef CDS_DOXYGEN_INVOKED + intrusive::OptimisticQueue< GC, intrusive::optimistic_queue::node< T >, Options... > +#else + details::make_optimistic_queue< GC, T, CDS_OPTIONS7 >::type +#endif + { + //@cond + typedef details::make_optimistic_queue< GC, T, CDS_OPTIONS7 > options; + typedef typename options::type base_class; + //@endcond + + public: + /// Rebind template arguments + template + struct rebind { + typedef OptimisticQueue< GC2, T2, CDS_OTHER_OPTIONS7> other ; ///< Rebinding result + }; + + public: + typedef T value_type ; ///< Value type stored in the stack + + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename options::options::item_counter item_counter ; ///< Item counting policy used + typedef typename options::options::stat stat ; ///< Internal statistics policy used + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + static CDS_CONSTEXPR_CONST size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm + + protected: + typedef typename options::node_type node_type ; ///< queue node type (derived from intrusive::optimistic_queue::node) + + //@cond + typedef typename options::cxx_allocator cxx_allocator; + typedef typename options::node_deallocator node_deallocator; // deallocate node + typedef typename base_class::node_traits node_traits; + //@endcond + + protected: + ///@cond + static node_type * alloc_node() + { + return cxx_allocator().New(); + } + static node_type * alloc_node( const value_type& val ) + { + return cxx_allocator().New( val ); + } +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node_move( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( args )... ); + } +# endif + static void free_node( node_type * p ) + { + node_deallocator()( p ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + public: + /// Initializes empty queue + OptimisticQueue() + {} + + /// Destructor clears the queue + ~OptimisticQueue() + {} + + /// Returns queue's item count (see \ref intrusive::OptimisticQueue::size for explanation) + size_t size() const + { + return base_class::size(); + } + + /// Returns reference to internal statistics + const stat& statistics() const + { + return base_class::statistics(); + } + + /// Enqueues \p val value into the queue. + /** + The function makes queue node in dynamic memory calling copy constructor for \p val + and then it calls intrusive::OptimisticQueue::enqueue. + Returns \p true if success, \p false otherwise. + */ + bool enqueue( const value_type& val ) + { + scoped_node_ptr p( alloc_node(val)); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Enqueues \p data to queue using copy functor + /** + \p Func is a functor called to copy value \p data of type \p Type + which may be differ from type \p T stored in the queue. + The functor's interface is: + \code + struct myFunctor { + void operator()(T& dest, SOURCE const& data) + { + // // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + + Requirements The functor \p Func should not throw any exception. + */ + template + bool enqueue( const Type& data, Func f ) + { + scoped_node_ptr p( alloc_node() ); + unref(f)( p->m_value, data ); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Enqueues data of type \ref value_type constructed with std::forward(args)... + /** + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr p( alloc_node_move( std::forward(args)... )); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } +# endif + + /// Dequeues a value using copy functor + /** + \p Func is a functor called to copy dequeued value to \p dest of type \p Type + which may be differ from type \p T stored in the queue. + The functor's interface is: + \code + struct myFunctor { + void operator()(Type& dest, T const& data) + { + // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + + Requirements The functor \p Func should not throw any exception. + */ + template + bool dequeue( Type& dest, Func f ) + { + typename base_class::dequeue_result res; + if ( base_class::do_dequeue( res )) { + unref(f)( dest, node_traits::to_value_ptr( *res.pNext )->m_value ); + + base_class::dispose_result( res ); + + return true; + } + return false; + } + + /// Dequeues a value from the queue + /** + If queue is not empty, the function returns \p true, \p dest contains copy of + dequeued value. The assignment operator for type \ref value_type is invoked. + If queue is empty, the function returns \p false, \p dest is unchanged. + */ + bool dequeue( value_type& dest ) + { + typedef cds::details::trivial_assign functor; + return dequeue( dest, functor() ); + } + + /// Synonym for \ref enqueue function + bool push( const value_type& val ) + { + return enqueue( val ); + } + + /// Synonym for template version of \ref enqueue function + template + bool push( const Type& data, Func f ) + { + return enqueue( data, f ); + } + + /// Synonym for \ref dequeue function + bool pop( value_type& dest ) + { + return dequeue( dest ); + } + + /// Synonym for template version of \ref dequeue function + template + bool pop( Type& dest, Func f ) + { + return dequeue( dest, f ); + } + + /// Checks if the queue is empty + bool empty() const + { + return base_class::empty(); + } + + /// Clear the queue + /** + The function repeatedly calls \ref dequeue until it returns NULL. + */ + void clear() + { + base_class::clear(); + } + }; + +}} // namespace cds::container + +#endif //#ifndef __CDS_CONTAINER_OPTIMISTIC_QUEUE_H diff --git a/cds/container/rwqueue.h b/cds/container/rwqueue.h new file mode 100644 index 00000000..7533682e --- /dev/null +++ b/cds/container/rwqueue.h @@ -0,0 +1,362 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_RWQUEUE_H +#define __CDS_CONTAINER_RWQUEUE_H + +#include +#include +#include +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Michael & Scott blocking queue with fine-grained synchronization schema + /** @ingroup cds_nonintrusive_queue + The queue has two different locks: one for reading and one for writing. + Therefore, one writer and one reader can simultaneously access to the queue. + The queue does not require any garbage collector. + + Source + - [1998] Maged Michael, Michael Scott "Simple, fast, and practical non-blocking + and blocking concurrent queue algorithms" + + Template arguments + - \p T - type to be stored in the queue + - \p Options - options + + \p Options are: + - opt::allocator - allocator (like \p std::allocator). Default is \ref CDS_DEFAULT_ALLOCATOR + - opt::lock_type - type of lock primitive. Default is cds::lock::Spin. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter + - opt::stat - the type to gather internal statistics. + Possible option value are: queue_stat, queue_dummy_stat, user-provided class that supports queue_stat interface. + Default is \ref intrusive::queue_dummy_stat. + RWQueue uses only \p onEnqueue and \p onDequeue counter. + - opt::alignment - the alignment for \p lock_type to prevent false sharing. Default is opt::cache_line_alignment + + This queue has no intrusive counterpart. + */ + template + class RWQueue + { + //@cond + struct default_options + { + typedef lock::Spin lock_type; + typedef CDS_DEFAULT_ALLOCATOR allocator; + typedef atomicity::empty_item_counter item_counter; + typedef intrusive::queue_dummy_stat stat; + enum { alignment = opt::cache_line_alignment }; + }; + //@endcond + + public: + //@cond + typedef typename opt::make_options< + typename cds::opt::find_type_traits< default_options, CDS_OPTIONS6 >::type + ,CDS_OPTIONS6 + >::type options; + //@endcond + + public: + /// Rebind template arguments + template + struct rebind { + typedef RWQueue< T2, CDS_OTHER_OPTIONS6> other ; ///< Rebinding result + }; + + public: + typedef T value_type ; ///< type of value stored in the queue + + typedef typename options::lock_type lock_type ; ///< Locking primitive used + + protected: + //@cond + /// Node type + struct node_type + { + node_type * volatile m_pNext ; ///< Pointer to the next node in queue + value_type m_value ; ///< Value stored in the node + + node_type( value_type const& v ) + : m_pNext(null_ptr()) + , m_value(v) + {} + + node_type() + : m_pNext( null_ptr() ) + {} + +# ifdef CDS_EMPLACE_SUPPORT + template + node_type( Args&&... args ) + : m_pNext(null_ptr()) + , m_value( std::forward(args)...) + {} +# endif + }; + //@endcond + + public: + typedef typename options::allocator::template rebind::other allocator_type ; ///< Allocator type used for allocate/deallocate the queue nodes + typedef typename options::item_counter item_counter ; ///< Item counting policy used + typedef typename options::stat stat ; ///< Internal statistics policy used + + protected: + //@cond + typedef typename opt::details::alignment_setter< lock_type, options::alignment >::type aligned_lock_type; + typedef cds::lock::scoped_lock auto_lock; + typedef cds::details::Allocator< node_type, allocator_type > node_allocator; + + item_counter m_ItemCounter; + stat m_Stat; + + mutable aligned_lock_type m_HeadLock; + node_type * m_pHead; + mutable aligned_lock_type m_TailLock; + node_type * m_pTail; + //@endcond + + protected: + //@cond + static node_type * alloc_node() + { + return node_allocator().New(); + } + + static node_type * alloc_node( T const& data ) + { + return node_allocator().New( data ); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node_move( Args&&... args ) + { + return node_allocator().MoveNew( std::forward( args )... ); + } +# endif + + static void free_node( node_type * pNode ) + { + node_allocator().Delete( pNode ); + } + + bool enqueue_node( node_type * p ) + { + assert( p != null_ptr()); + { + auto_lock lock( m_TailLock ); + m_pTail = + m_pTail->m_pNext = p; + } + ++m_ItemCounter; + m_Stat.onEnqueue(); + return true; + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + public: + /// Makes empty queue + RWQueue() + { + node_type * pNode = alloc_node(); + m_pHead = + m_pTail = pNode; + } + + /// Destructor clears queue + ~RWQueue() + { + clear(); + assert( m_pHead == m_pTail ); + free_node( m_pHead ); + } + + /// Enqueues \p data. Always return \a true + bool enqueue( value_type const& data ) + { + scoped_node_ptr p( alloc_node( data )); + if ( enqueue_node( p.get() )) { + p.release(); + return true; + } + return false; + } + + /// Enqueues \p data to queue using copy functor + /** + \p Func is a functor called to copy value \p data of type \p Type + which may be differ from type \p T stored in the queue. + The functor's interface is: + \code + struct myFunctor { + void operator()(T& dest, Type const& data) + { + // // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + + Requirements The functor \p Func should not throw any exception. + */ + template + bool enqueue( Type const& data, Func f ) + { + scoped_node_ptr p( alloc_node()); + unref(f)( p->m_value, data ); + if ( enqueue_node( p.get() )) { + p.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Enqueues data of type \ref value_type constructed with std::forward(args)... + /** + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr p( alloc_node_move( std::forward(args)... )); + if ( enqueue_node( p.get() )) { + p.release(); + return true; + } + return false; + } +# endif + + /// Dequeues a value using copy functor + /** + \p Func is a functor called to copy dequeued value to \p dest of type \p Type + which may be differ from type \p T stored in the queue. + The functor's interface is: + \code + struct myFunctor { + void operator()(Type& dest, T const& data) + { + // // Copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + + Requirements The functor \p Func should not throw any exception. + */ + template + bool dequeue( Type& dest, Func f ) + { + node_type * pNode; + { + auto_lock lock( m_HeadLock ); + pNode = m_pHead; + node_type * pNewHead = pNode->m_pNext; + if ( pNewHead == null_ptr() ) + return false; + unref(f)( dest, pNewHead->m_value ); + m_pHead = pNewHead; + } // unlock here + --m_ItemCounter; + free_node( pNode ); + m_Stat.onDequeue(); + return true; + } + + /** Dequeues a value to \p dest. + + If queue is empty returns \a false, \p dest may be corrupted. + If queue is not empty returns \a true, \p dest contains the value dequeued + */ + bool dequeue( value_type& dest ) + { + typedef cds::details::trivial_assign functor; + return dequeue( dest, functor() ); + } + + /// Synonym for \ref enqueue + bool push( value_type const& data ) + { + return enqueue( data ); + } + + /// Synonym for template version of \ref enqueue function + template + bool push( Type const& data, Func f ) + { + return enqueue( data, f ); + } + + /// Synonym for \ref dequeue + bool pop( value_type& data ) + { + return dequeue( data ); + } + + /// Synonym for template version of \ref dequeue function + template + bool pop( Type& dest, Func f ) + { + return dequeue( dest, f ); + } + + /// Checks if queue is empty + bool empty() const + { + auto_lock lock( m_HeadLock ); + return m_pHead->m_pNext == null_ptr(); + } + + /// Clears queue + void clear() + { + auto_lock lockR( m_HeadLock ); + auto_lock lockW( m_TailLock ); + while ( m_pHead->m_pNext != null_ptr() ) { + node_type * pHead = m_pHead; + m_pHead = m_pHead->m_pNext; + free_node( pHead ); + } + } + + /// Returns queue's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the queue + is empty. To check queue emptyness use \ref empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_RWQUEUE_H diff --git a/cds/container/segmented_queue.h b/cds/container/segmented_queue.h new file mode 100644 index 00000000..45b894be --- /dev/null +++ b/cds/container/segmented_queue.h @@ -0,0 +1,409 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SEGMENTED_QUEUE_H +#define __CDS_CONTAINER_SEGMENTED_QUEUE_H + +#include +#include +#include +#include + +namespace cds { namespace container { + + /// SegmentedQueue -related declarations + namespace segmented_queue { + +# ifdef CDS_DOXYGEN_INVOKED + /// SegmentedQueue internal statistics + typedef cds::intrusive::segmented_queue::stat stat; +# else + using cds::intrusive::segmented_queue::stat; +# endif + + /// SegmentedQueue empty internal statistics (no overhead) + typedef cds::intrusive::segmented_queue::empty_stat empty_stat; + + /// SegmentedQueue default type traits + struct type_traits { + + /// Item allocator. Default is \ref CDS_DEFAULT_ALLOCATOR + typedef CDS_DEFAULT_ALLOCATOR node_allocator; + + /// Item counter, default is atomicity::item_counter + /** + The item counting is an essential part of segmented queue algorithm. + The \p empty() member function is based on checking size() == 0. + Therefore, dummy item counter like atomicity::empty_item_counter is not the proper counter. + */ + typedef atomicity::item_counter item_counter; + + /// Internal statistics, possible predefined types are \ref stat, \ref empty_stat (the default) + typedef segmented_queue::empty_stat stat; + + /// Memory model, default is opt::v::relaxed_ordering. See cds::opt::memory_model for the full list of possible types + typedef opt::v::relaxed_ordering memory_model; + + /// Alignment of critical data, default is cache line alignment. See cds::opt::alignment option specification + enum { alignment = opt::cache_line_alignment }; + + /// Segment allocator. Default is \ref CDS_DEFAULT_ALLOCATOR + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Lock type used to maintain an internal list of allocated segments + typedef cds::lock::Spin lock_type; + + /// Random \ref cds::opt::permutation_generator "permutation generator" for sequence [0, quasi_factor) + typedef cds::opt::v::random2_permutation permutation_generator; + }; + + /// Metafunction converting option list to traits for SegmentedQueue + /** + The metafunction can be useful if a few fields in \ref type_traits should be changed. + For example: + \code + typedef cds::container::segmented_queue::make_traits< + cds::opt::item_counter< cds::atomicity::item_counter > + >::type my_segmented_queue_traits; + \endcode + This code creates \p %SegmentedQueue type traits with item counting feature, + all other \p type_traits members left unchanged. + + \p Options are: + - \p opt::node_allocator - node allocator. + - \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default) + - \p opt::item_counter - item counting feature. Note that atomicity::empty_item_counetr is not suitable + for segmented queue. + - \p opt::memory_model - memory model, default is \p opt::v::relaxed_ordering. + See option description for the full list of possible models + - \p opt::alignment - the alignmentfor critical data, see option description for explanation + - \p opt::allocator - the allocator used to maintain segments. + - \p opt::lock_type - a mutual exclusion lock type used to maintain internal list of allocated + segments. Default is \p cds::opt::Spin, \p std::mutex is also suitable. + - \p opt::permutation_generator - a random permutation generator for sequence [0, quasi_factor), + default is cds::opt::v::random2_permutation + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS9 >::type + ,CDS_OPTIONS9 + >::type type; +# endif + }; + + } // namespace segmented_queue + + //@cond + namespace details { + + template + struct make_segmented_queue + { + typedef GC gc; + typedef T value_type; + typedef Traits original_type_traits; + + typedef cds::details::Allocator< T, typename original_type_traits::node_allocator > cxx_node_allocator; + struct node_disposer { + void operator()( T * p ) + { + cxx_node_allocator().Delete( p ); + } + }; + + struct intrusive_type_traits: public original_type_traits { + typedef node_disposer disposer; + }; + + typedef cds::intrusive::SegmentedQueue< gc, value_type, intrusive_type_traits > type; + }; + + } // namespace details + //@endcond + + /// Segmented queue + /** @ingroup cds_nonintrusive_queue + + The queue is based on work + - [2010] Afek, Korland, Yanovsky "Quasi-Linearizability: relaxed consistency for improved concurrency" + + In this paper the authors offer a relaxed version of linearizability, so-called quasi-linearizability, + that preserves some of the intuition, provides a flexible way to control the level of relaxation + and supports th implementation of more concurrent and scalable data structure. + Intuitively, the linearizability requires each run to be equivalent in some sense to a serial run + of the algorithm. This equivalence to some serial run imposes strong synchronization requirements + that in many cases results in limited scalability and synchronization bottleneck. + + The general idea is that the queue maintains a linked list of segments, each segment is an array of + nodes in the size of the quasi factor, and each node has a deleted boolean marker, which states + if it has been dequeued. Each producer iterates over last segment in the linked list in some random + permutation order. Whet it finds an empty cell it performs a CAS operation attempting to enqueue its + new element. In case the entire segment has been scanned and no available cell is found (implying + that the segment is full), then it attempts to add a new segment to the list. + + The dequeue operation is similar: the consumer iterates over the first segment in the linked list + in some random permutation order. When it finds an item which has not yet been dequeued, it performs + CAS on its deleted marker in order to "delete" it, if succeeded this item is considered dequeued. + In case the entire segment was scanned and all the nodes have already been dequeued (implying that + the segment is empty), then it attempts to remove this segment from the linked list and starts + the same process on the next segment. If there is no next segment, the queue is considered empty. + + Based on the fact that most of the time threads do not add or remove segments, most of the work + is done in parallel on different cells in the segments. This ensures a controlled contention + depending on the segment size, which is quasi factor. + + The segmented queue is an unfair queue since it violates the strong FIFO order but no more than + quasi factor. It means that the consumer dequeues any item from the current first segment. + + Template parameters: + - \p GC - a garbage collector, possible types are cds::gc::HP, cds::gc::PTB + - \p T - the type of values stored in the queue + - \p Traits - queue type traits, default is segmented_queue::type_traits. + segmented_queue::make_traits metafunction can be used to construct your + type traits. + */ + template + class SegmentedQueue: +#ifdef CDS_DOXYGEN_INVOKED + public cds::intrusive::SegmentedQueue< GC, T, Traits > +#else + public details::make_segmented_queue< GC, T, Traits >::type +#endif + { + //@cond + typedef details::make_segmented_queue< GC, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef GC gc ; ///< Garbage collector + typedef T value_type ; ///< type of the value stored in the queue + typedef Traits options ; ///< Queue's traits + + typedef typename options::node_allocator node_allocator; ///< Node allocator + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::item_counter item_counter; ///< Item counting policy, see cds::opt::item_counter option setter + typedef typename base_class::stat stat ; ///< Internal statistics policy + typedef typename base_class::lock_type lock_type ; ///< Type of mutex for maintaining an internal list of allocated segments. + typedef typename base_class::permutation_generator permutation_generator; ///< Random permutation generator for sequence [0, quasi-factor) + + static const size_t m_nHazardPtrCount = base_class::m_nHazardPtrCount ; ///< Count of hazard pointer required for the algorithm + + protected: + //@cond + typedef typename maker::cxx_node_allocator cxx_node_allocator; + typedef std::unique_ptr< value_type, typename maker::node_disposer > scoped_node_ptr; + + static value_type * alloc_node( value_type const& v ) + { + return cxx_node_allocator().New( v ); + } + + static value_type * alloc_node() + { + return cxx_node_allocator().New(); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + static value_type * alloc_node_move( Args&&... args ) + { + return cxx_node_allocator().MoveNew( std::forward( args )... ); + } +# endif + + struct dummy_disposer { + void operator()( value_type * p ) + {} + }; + //@endcond + + public: + /// Initializes the empty queue + SegmentedQueue( + size_t nQuasiFactor ///< Quasi factor. If it is not a power of 2 it is rounded up to nearest power of 2. Minimum is 2. + ) + : base_class( nQuasiFactor ) + {} + + /// Clears the queue and deletes all internal data + ~SegmentedQueue() + {} + + /// Inserts a new element at last segment of the queue + /** + The function makes queue node in dynamic memory calling copy constructor for \p val + and then it calls intrusive::SEgmentedQueue::enqueue. + Returns \p true if success, \p false otherwise. + */ + bool enqueue( value_type const& val ) + { + scoped_node_ptr p( alloc_node(val) ); + if ( base_class::enqueue( *p ) ) { + p.release(); + return true; + } + return false; + } + + /// Synonym for enqueue(value_type const&) function + bool push( value_type const& val ) + { + return enqueue( val ); + } + + /// Inserts a new element at last segment of the queue using copy functor + /** + \p Func is a functor called to copy value \p data of type \p Q + which may be differ from type \ref value_type stored in the queue. + The functor's interface is: + \code + struct myFunctor { + void operator()(value_type& dest, Q const& data) + { + // // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + */ + template + bool enqueue( Q const& data, Func f ) + { + scoped_node_ptr p( alloc_node() ); + unref(f)( *p, data ); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Synonym for enqueue(Q const&, Func) function + template + bool push( Q const& data, Func f ) + { + return enqueue( data, f ); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Enqueues data of type \ref value_type constructed with std::forward(args)... + /** + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr p( alloc_node_move( std::forward(args)... ) ); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } +# endif + + /// Removes an element from first segment of the queue + /** + \p Func is a functor called to copy dequeued value to \p dest of type \p Q + which may be differ from type \ref value_type stored in the queue. + The functor's interface is: + \code + struct myFunctor { + void operator()(Q& dest, value_type const& data) + { + // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + */ + template + bool dequeue( Q& dest, Func f ) + { + value_type * p = base_class::dequeue(); + if ( p ) { + unref(f)( dest, *p ); + gc::template retire< typename maker::node_disposer >( p ); + return true; + } + return false; + } + + /// Synonym for dequeue( Q&, Func ) function + template + bool pop( Q& dest, Func f ) + { + return dequeue( dest, f ); + } + + /// Dequeues a value from the queue + /** + If queue is not empty, the function returns \p true, \p dest contains copy of + dequeued value. The assignment operator for type \ref value_type is invoked. + If queue is empty, the function returns \p false, \p dest is unchanged. + */ + bool dequeue( value_type& dest ) + { + typedef cds::details::trivial_assign functor; + return dequeue( dest, functor() ); + } + + /// Synonym for dequeue(value_type&) function + bool pop( value_type& dest ) + { + return dequeue( dest ); + } + + /// Checks if the queue is empty + /** + The original segmented queue algorithm does not allow to check emptiness accurately + because \p empty() is unlinearizable. + This function tests queue's emptiness checking size() == 0, + so, the item counting feature is an essential part of queue's algorithm. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Clear the queue + /** + The function repeatedly calls \ref dequeue until it returns \p nullptr. + The disposer specified in \p Traits template argument is called for each removed item. + */ + void clear() + { + base_class::clear(); + } + + /// Returns queue's item count + size_t size() const + { + return base_class::size(); + } + + /// Returns reference to internal statistics + /** + The type of internal statistics is specified by \p Traits template argument. + */ + const stat& statistics() const + { + return base_class::statistics(); + } + + /// Returns quasi factor, a power-of-two number + size_t quasi_factor() const + { + return base_class::quasi_factor(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_SEGMENTED_QUEUE_H diff --git a/cds/container/skip_list_base.h b/cds/container/skip_list_base.h new file mode 100644 index 00000000..c52f0155 --- /dev/null +++ b/cds/container/skip_list_base.h @@ -0,0 +1,299 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SKIP_LIST_BASE_H +#define __CDS_CONTAINER_SKIP_LIST_BASE_H + +#include +#include + +namespace cds { namespace container { + + /// SkipListSet related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace skip_list { + +#ifdef CDS_DOXYGEN_INVOKED + /// Typedef for intrusive::skip_list::random_level_generator template + struct random_level_generator {}; +#else + using cds::intrusive::skip_list::random_level_generator; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + /// Typedef for intrusive::skip_list::xorshift class + class xorshift {}; +#else + using cds::intrusive::skip_list::xorshift; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + /// Typedef for intrusive::skip_list::turbo_pascal class + class turbo_pascal {}; +#else + using cds::intrusive::skip_list::turbo_pascal; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + /// Typedef for intrusive::skip_list::stat class + class stat {}; +#else + using cds::intrusive::skip_list::stat; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + /// Typedef for intrusive::skip_list::empty_stat class + class empty_stat {}; +#else + using cds::intrusive::skip_list::empty_stat; +#endif + + /// Type traits for SkipListSet class + struct type_traits + { + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// specifies binary predicate used for key compare. + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// Item counter + /** + The type for item counting feature. + Default is no item counter (\ref atomicity::empty_item_counter) + */ + typedef atomicity::empty_item_counter item_counter; + + /// C++ memory ordering model + /** + List of available memory ordering see opt::memory_model + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Random level generator + /** + The random level generator is an important part of skip-list algorithm. + The node height in the skip-list have a probabilistic distribution + where half of the nodes that have level \p i also have level i+1 + (i = 0..30). The height of a node is in range [0..31]. + + See skip_list::random_level_generator option setter. + */ + typedef turbo_pascal random_level_generator; + + /// Allocator for skip-list nodes, \p std::allocator interface + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// back-off strategy used + /** + If the option is not specified, the cds::backoff::Default is used. + */ + typedef cds::backoff::Default back_off; + + /// Internal statistics + typedef empty_stat stat; + + /// RCU deadlock checking policy (for \ref cds_nonintrusive_SkipListSet_rcu "RCU-based SkipListSet") + /** + List of available options see opt::rcu_check_deadlock + */ + typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; + + //@cond + // For internal use only + typedef opt::none key_accessor; + //@endcond + }; + + /// Metafunction converting option list to SkipListSet traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + \p Options list see \ref SkipListSet. + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS10 >::type + ,CDS_OPTIONS10 + >::type type; +# endif + }; + + //@cond + namespace details { + + template + class node_allocator + { + protected: + typedef Node node_type; + typedef Traits type_traits; + + typedef typename node_type::tower_item_type node_tower_item; + typedef typename type_traits::allocator::template rebind::other tower_allocator_type; + typedef typename type_traits::allocator::template rebind::other node_allocator_type; + + static size_t const c_nTowerItemSize = sizeof(node_tower_item); + static size_t const c_nNodePadding = sizeof(node_type) % c_nTowerItemSize; + static size_t const c_nNodeSize = sizeof(node_type) + (c_nNodePadding ? (c_nTowerItemSize - c_nNodePadding) : 0); + + static CDS_CONSTEXPR size_t node_size( unsigned int nHeight ) CDS_NOEXCEPT + { + return c_nNodeSize + (nHeight - 1) * c_nTowerItemSize; + } + static unsigned char * alloc_space( unsigned int nHeight ) + { + if ( nHeight > 1 ) { + unsigned char * pMem = tower_allocator_type().allocate( node_size(nHeight) ); + + // check proper alignments + assert( (((uintptr_t) pMem) & (alignof(node_type) - 1)) == 0 ); + assert( (((uintptr_t) (pMem + c_nNodeSize)) & (alignof(node_tower_item) - 1)) == 0 ); + return pMem; + } + + return reinterpret_cast( node_allocator_type().allocate(1)); + } + + static void free_space( unsigned char * p, unsigned int nHeight ) + { + assert( p != null_ptr() ); + if ( nHeight == 1 ) + node_allocator_type().deallocate( reinterpret_cast(p), 1 ); + else + tower_allocator_type().deallocate( p, node_size(nHeight)); + } + + public: + template + node_type * New( unsigned int nHeight, Q const& v ) + { + unsigned char * pMem = alloc_space( nHeight ); + return new( pMem ) + node_type( nHeight, nHeight > 1 ? reinterpret_cast( pMem + c_nNodeSize ) : null_ptr(), v ); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + node_type * New( unsigned int nHeight, Args&&... args ) + { + unsigned char * pMem = alloc_space( nHeight ); + return new( pMem ) + node_type( nHeight, nHeight > 1 ? reinterpret_cast( pMem + c_nNodeSize ): null_ptr(), + std::forward(args)... ); + } +# endif + + void Delete( node_type * p ) + { + assert( p != null_ptr() ); + + unsigned int nHeight = p->height(); + node_allocator_type().destroy( p ); + free_space( reinterpret_cast(p), nHeight ); + } + }; + + template + struct dummy_node_builder { + typedef IntrusiveNode intrusive_node_type; + + template + static intrusive_node_type * make_tower( intrusive_node_type * pNode, RandomGen& /*gen*/ ) { return pNode ; } + static intrusive_node_type * make_tower( intrusive_node_type * pNode, unsigned int /*nHeight*/ ) { return pNode ; } + static void dispose_tower( intrusive_node_type * pNode ) + { + pNode->release_tower(); + } + + struct node_disposer { + void operator()( intrusive_node_type * pNode ) const {} + }; + }; + + template + class iterator + { + typedef ForwardIterator intrusive_iterator; + typedef typename intrusive_iterator::value_type node_type; + typedef typename node_type::stored_value_type value_type; + static bool const c_isConst = intrusive_iterator::c_isConst; + + typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref; + + intrusive_iterator m_It; + + public: // for internal use only!!! + iterator( intrusive_iterator const& it ) + : m_It( it ) + {} + + public: + iterator() + : m_It() + {} + + iterator( iterator const& s) + : m_It( s.m_It ) + {} + + value_type * operator ->() const + { + return &( m_It.operator->()->m_Value ); + } + + value_ref operator *() const + { + return m_It.operator*().m_Value; + } + + /// Pre-increment + iterator& operator ++() + { + ++m_It; + return *this; + } + + iterator& operator = (iterator const& src) + { + m_It = src.m_It; + return *this; + } + + template + bool operator ==(iterator const& i ) const + { + return m_It == i.m_It; + } + template + bool operator !=(iterator const& i ) const + { + return !( *this == i ); + } + }; + + } // namespace details + //@endcond + + } // namespace skip_list + + // Forward declaration + template + class SkipListSet; + + // Forward declaration + template + class SkipListMap; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_SKIP_LIST_BASE_H diff --git a/cds/container/skip_list_map_hp.h b/cds/container/skip_list_map_hp.h new file mode 100644 index 00000000..7665aa53 --- /dev/null +++ b/cds/container/skip_list_map_hp.h @@ -0,0 +1,11 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SKIP_LIST_MAP_HP_H +#define __CDS_CONTAINER_SKIP_LIST_MAP_HP_H + +#include +#include +#include +#include + +#endif // #ifndef __CDS_CONTAINER_SKIP_LIST_MAP_HP_H diff --git a/cds/container/skip_list_map_hrc.h b/cds/container/skip_list_map_hrc.h new file mode 100644 index 00000000..53d230e9 --- /dev/null +++ b/cds/container/skip_list_map_hrc.h @@ -0,0 +1,11 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SKIP_LIST_MAP_HRC_H +#define __CDS_CONTAINER_SKIP_LIST_MAP_HRC_H + +#include +#include +#include +#include + +#endif // #ifndef __CDS_CONTAINER_SKIP_LIST_MAP_HRC_H diff --git a/cds/container/skip_list_map_impl.h b/cds/container/skip_list_map_impl.h new file mode 100644 index 00000000..a1c83eda --- /dev/null +++ b/cds/container/skip_list_map_impl.h @@ -0,0 +1,803 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SKIP_LIST_MAP_IMPL_H +#define __CDS_CONTAINER_SKIP_LIST_MAP_IMPL_H + +#include +#include +#include + +namespace cds { namespace container { + + /// Lock-free skip-list map + /** @ingroup cds_nonintrusive_map + \anchor cds_nonintrusive_SkipListMap_hp + + The implementation of well-known probabilistic data structure called skip-list + invented by W.Pugh in his papers: + - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees + - [1990] W.Pugh A Skip List Cookbook + + A skip-list is a probabilistic data structure that provides expected logarithmic + time search without the need of rebalance. The skip-list is a collection of sorted + linked list. Nodes are ordered by key. Each node is linked into a subset of the lists. + Each list has a level, ranging from 0 to 32. The bottom-level list contains + all the nodes, and each higher-level list is a sublist of the lower-level lists. + Each node is created with a random top level (with a random height), and belongs + to all lists up to that level. The probability that a node has the height 1 is 1/2. + The probability that a node has the height N is 1/2 ** N (more precisely, + the distribution depends on an random generator provided, but our generators + have this property). + + The lock-free variant of skip-list is implemented according to book + - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming", + chapter 14.4 "A Lock-Free Concurrent Skiplist" + + Template arguments: + - \p GC - Garbage collector used. + - \p K - type of a key to be stored in the list. + - \p T - type of a value to be stored in the list. + - \p Traits - type traits. See skip_list::type_traits for explanation. + + It is possible to declare option-based list with cds::container::skip_list::make_traits metafunction istead of \p Traits template + argument. + Template argument list \p Options of cds::container::skip_list::make_traits metafunction are: + - opt::compare - key compare functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - skip_list::random_level_generator - random level generator. Can be skip_list::xorshift, skip_list::turbo_pascal or + user-provided one. See skip_list::random_level_generator option description for explanation. + Default is \p %skip_list::turbo_pascal. + - opt::allocator - allocator for skip-list node. Default is \ref CDS_DEFAULT_ALLOCATOR. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::Default is used. + - opt::stat - internal statistics. Available types: skip_list::stat, skip_list::empty_stat (the default) + + Like STL map class, %SkipListMap stores its key-value pair as std:pair< K const, T>. + + \warning The skip-list requires up to 67 hazard pointers that may be critical for some GCs for which + the guard count is limited (like as gc::HP, gc::HRC). Those GCs should be explicitly initialized with + hazard pointer enough: \code cds::gc::HP myhp( 67 ) \endcode. Otherwise an run-time exception may be raised + when you try to create skip-list object. + + \note There are several specializations of \p %SkipListMap for each \p GC. You should include: + - for gc::HP garbage collector + - for gc::HRC garbage collector + - for gc::PTB garbage collector + - for \ref cds_nonintrusive_SkipListMap_rcu "RCU type" + - for \ref cds_nonintrusive_SkipListMap_nogc "non-deletable SkipListMap" + + Iterators + + The class supports a forward iterator (\ref iterator and \ref const_iterator). + The iteration is ordered. + The iterator object is thread-safe: the element pointed by the iterator object is guarded, + so, the element cannot be reclaimed while the iterator object is alive. + However, passing an iterator object between threads is dangerous. + + \warning Due to concurrent nature of skip-list map it is not guarantee that you can iterate + all elements in the map: any concurrent deletion can exclude the element + pointed by the iterator from the map, and your iteration can be terminated + before end of the map. Therefore, such iteration is more suitable for debugging purpose only + + Remember, each iterator object requires 2 additional hazard pointers, that may be + a limited resource for \p GC like as gc::HP and gc::HRC (however, for gc::PTB the count of + guards is unlimited). + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \ref end, \ cend member functions points to \p NULL and should not be dereferenced. + + */ + template < + typename GC, + typename Key, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = skip_list::type_traits +#else + typename Traits +#endif + > + class SkipListMap: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::SkipListSet< GC, std::pair, Traits > +#else + protected details::make_skip_list_map< GC, Key, T, Traits >::type +#endif + { + //@cond + typedef details::make_skip_list_map< GC, Key, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef Key key_type ; ///< Key type + typedef T mapped_type ; ///< Mapped type +# ifdef CDS_DOXYGEN_INVOKED + typedef std::pair< K const, T> value_type ; ///< Value type stored in the map +# else + typedef typename maker::value_type value_type; +# endif + typedef Traits options ; ///< Options specified + + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator allocator_type ; ///< Allocator type used for allocate/deallocate the skip-list nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator ; ///< key comparison functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename options::random_level_generator random_level_generator ; ///< random level generator + typedef typename options::stat stat ; ///< internal statistics type + + protected: + //@cond + typedef typename maker::node_type node_type; + typedef typename maker::node_allocator node_allocator; + + typedef std::unique_ptr< node_type, typename maker::node_deallocator > scoped_node_ptr; + + //@endcond + + public: + /// Guarded pointer + typedef cds::gc::guarded_ptr< gc, node_type, value_type, details::guarded_ptr_cast_set > guarded_ptr; + + protected: + //@cond + unsigned int random_level() + { + return base_class::random_level(); + } + //@endcond + + protected: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_insert_functor + { + void operator()( value_type& ) const + {} + }; + + template + class insert_value_functor + { + Q const& m_val; + public: + insert_value_functor( Q const & v) + : m_val(v) + {} + + void operator()( value_type& item ) + { + item.second = m_val; + } + }; + + template + class insert_key_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + insert_key_wrapper( Func f ): base_class(f) {} + + void operator()( node_type& item ) + { + base_class::get()( item.m_Value ); + } + }; + + template + class ensure_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + ensure_wrapper( Func f) : base_class(f) {} + + void operator()( bool bNew, node_type& item, node_type const& ) + { + base_class::get()( bNew, item.m_Value ); + } + }; + + template + struct erase_functor + { + Func m_func; + + erase_functor( Func f ) + : m_func(f) + {} + + void operator()( node_type& node ) + { + cds::unref(m_func)( node.m_Value ); + } + }; + + template + class find_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + find_wrapper( Func f ) + : base_class(f) + {} + + template + void operator()( node_type& item, Q& val ) + { + base_class::get()( item.m_Value, val ); + } + }; +# endif // #ifndef CDS_CXX11_LAMBDA_SUPPORT + //@endcond + + public: + /// Default ctor + SkipListMap() + : base_class() + {} + + /// Destructor destroys the set object + ~SkipListMap() + {} + + public: + /// Iterator type + typedef skip_list::details::iterator< typename base_class::iterator > iterator; + + /// Const iterator type + typedef skip_list::details::iterator< typename base_class::const_iterator > const_iterator; + + /// Returns a forward iterator addressing the first element in a map + iterator begin() + { + return iterator( base_class::begin() ); + } + + /// Returns a forward const iterator addressing the first element in a map + //@{ + const_iterator begin() const + { + return cbegin(); + } + const_iterator cbegin() + { + return const_iterator( base_class::cbegin() ); + } + //@} + + /// Returns a forward iterator that addresses the location succeeding the last element in a map. + iterator end() + { + return iterator( base_class::end() ); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a map. + //@{ + const_iterator end() const + { + return cend(); + } + const_iterator cend() + { + return const_iterator( base_class::cend() ); + } + //@} + + public: + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from a value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K const& key ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return insert_key( key, [](value_type&){} ); +# else + return insert_key( key, empty_insert_functor() ); +# endif + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref value_type should be constructible from \p val of type \p V. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return insert_key( key, [&val](value_type& item) { item.second = val ; } ); +# else + insert_value_functor f(val); + return insert_key( key, cds::ref(f) ); +# endif + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + The user-defined functor can be passed by reference using boost::ref + and it is called only if inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + */ + template + bool insert_key( const K& key, Func func ) + { + scoped_node_ptr pNode( node_allocator().New( random_level(), key )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( base_class::insert( *pNode, [&func]( node_type& item ) { cds::unref(func)( item.m_Value ); } )) +# else + insert_key_wrapper wrapper(func); + if ( base_class::insert( *pNode, cds::ref(wrapper) )) +#endif + { + pNode.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// For key \p key inserts data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + @note This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( K&& key, Args&&... args ) + { + scoped_node_ptr pNode( node_allocator().New( random_level(), std::forward(key), std::forward(args)... )); + if ( base_class::insert( *pNode )) { + pNode.release(); + return true; + } + return false; + } +# endif + + + /// Ensures that the \p key exists in the map + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the map, then the new item created from \p key + is inserted into the map (note that in this case the \ref key_type should be + constructible from type \p K). + Otherwise, the functor \p func is called with item found. + The functor \p Func may be a function with signature: + \code + void func( bool bNew, value_type& item ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + + The functor may change any fields of the \p item.second that is \ref value_type. + + You may pass \p func argument by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( K const& key, Func func ) + { + scoped_node_ptr pNode( node_allocator().New( random_level(), key )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair res = base_class::ensure( *pNode, + [&func](bool bNew, node_type& item, node_type const& ){ cds::unref(func)( bNew, item.m_Value ); } + ); +# else + ensure_wrapper wrapper( func ); + std::pair res = base_class::ensure( *pNode, cds::ref(wrapper) ); +# endif + if ( res.first && res.second ) + pNode.release(); + return res; + } + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_SkipListMap_erase_val + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return base_class::erase(key); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred ) + { + return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >()); + } + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_SkipListMap_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + The functor may be passed by reference using boost:ref + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase( key, [&f]( node_type& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor wrapper(f); + return base_class::erase( key, cds::ref(wrapper)); +# endif + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase_with( key, + cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >(), + [&f]( node_type& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor wrapper(f); + return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >(), cds::ref(wrapper)); +# endif + } + + /// Extracts the item from the map with specified \p key + /** \anchor cds_nonintrusive_SkipListMap_hp_extract + The function searches an item with key equal to \p key in the map, + unlinks it from the map, and returns it in \p ptr parameter. + If the item with key equal to \p key is not found the function returns \p false. + + Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. + + The item extracted is freed automatically by garbage collector \p GC + when returned \ref guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::SkipListMap< cds::gc::HP, int, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp; + if ( theList.extract( gp, 5 ) ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard and frees the pointer + } + \endcode + */ + template + bool extract( guarded_ptr& ptr, K const& key ) + { + return base_class::extract_( ptr.guard(), key, typename base_class::key_comparator() ); + } + + /// Extracts the item from the map with comparing functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_hp_extract "extract(K const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + bool extract_with( guarded_ptr& ptr, K const& key, Less pred ) + { + typedef cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor > wrapped_less; + return base_class::extract_( ptr.guard(), key, cds::opt::details::make_comparator_from_less() ); + } + + /// Extracts an item with minimal key from the map + /** + The function searches an item with minimal key, unlinks it, and returns the item found in \p ptr parameter. + If the skip-list is empty the function returns \p false. + + The item extracted is freed automatically by garbage collector \p GC + when returned \ref guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::continer::SkipListMap< cds::gc::HP, int, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp; + if ( theList.extract_min( gp )) { + // Deal with gp + //... + } + // Destructor of gp releases internal HP guard and then frees the pointer + } + \endcode + */ + bool extract_min( guarded_ptr& ptr) + { + return base_class::extract_min_( ptr.guard() ); + } + + /// Extracts an item with maximal key from the map + /** + The function searches an item with maximal key, unlinks it, and returns the pointer to item found in \p ptr parameter. + If the skip-list is empty the function returns empty \p guarded_ptr. + + The item found is freed by garbage collector \p GC automatically + when returned \ref guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::SkipListMap< cds::gc::HP, int, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp; + if ( theList.extract_max( gp )) { + // Deal with gp + //... + } + // Destructor of gp releases internal HP guard and then frees the pointer + } + \endcode + */ + bool extract_max( guarded_ptr& dest ) + { + return base_class::extract_max_( dest.guard() ); + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_SkipListMap_find_cfunc + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + You can pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change \p item.second. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( key, [&f](node_type& item, K const& ) { cds::unref(f)( item.m_Value );}); +# else + find_wrapper wrapper(f); + return base_class::find( key, cds::ref(wrapper) ); +# endif + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_find_cfunc "find(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( key, + cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >(), + [&f](node_type& item, K const& ) { cds::unref(f)( item.m_Value );}); +# else + find_wrapper wrapper(f); + return base_class::find_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >(), cds::ref(wrapper) ); +# endif + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_SkipListMap_find_val + + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool find( K const& key ) + { + return base_class::find( key ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_find_val "find(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred ) + { + return base_class::find_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >() ); + } + + /// Finds the key \p key and return the item found + /** \anchor cds_nonintrusive_SkipListMap_hp_get + The function searches the item with key equal to \p key + and assigns the item found to guarded pointer \p ptr. + The function returns \p true if \p key is found, and \p false otherwise. + If \p key is not found the \p ptr parameter is not changed. + + It is safe when a concurrent thread erases the item returned in \p ptr guarded pointer. + In this case the item will be freed later by garbage collector \p GC automatically + when \p guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::SkipListMap< cds::gc::HP, int, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp; + if ( theList.get( gp, 5 ) ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p K that can be not the same as \p value_type. + */ + template + bool get( guarded_ptr& ptr, K const& key ) + { + return base_class::get_with_( ptr.guard(), key, typename base_class::key_comparator() ); + } + + /// Finds the key \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_hp_get "get( guarded_ptr& ptr, K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + bool get_with( guarded_ptr& ptr, K const& key, Less pred ) + { + typedef cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor > wrapped_less; + return base_class::get_with_( ptr.guard(), key, cds::opt::details::make_comparator_from_less< wrapped_less >()); + } + + /// Clears the map + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + }; +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_SKIP_LIST_MAP_IMPL_H diff --git a/cds/container/skip_list_map_nogc.h b/cds/container/skip_list_map_nogc.h new file mode 100644 index 00000000..cb402ebe --- /dev/null +++ b/cds/container/skip_list_map_nogc.h @@ -0,0 +1,386 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SKIP_LIST_MAP_NOGC_H +#define __CDS_CONTAINER_SKIP_LIST_MAP_NOGC_H + +#include + +namespace cds { namespace container { + //@cond + namespace skip_list { namespace details { + struct map_key_accessor + { + template + typename NodeType::stored_value_type::first_type const& operator()( NodeType const& node ) const + { + return node.m_Value.first; + } + }; + }} // namespace skip_list::details + //@endcond + + + /// Lock-free skip-list map (template specialization for gc::nogc) + /** @ingroup cds_nonintrusive_map + \anchor cds_nonintrusive_SkipListMap_nogc + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of map item. + See \ref cds_nonintrusive_SkipListMap_hp "SkipListMap" for detailed description. + + Template arguments: + - \p K - type of a key to be stored in the list. + - \p T - type of a value to be stored in the list. + - \p Traits - type traits. See skip_list::type_traits for explanation. + + It is possible to declare option-based list with cds::container::skip_list::make_traits metafunction istead of \p Traits template + argument. + Template argument list \p Options of cds::container::skip_list::make_traits metafunction are: + - opt::compare - key compare functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - skip_list::random_level_generator - random level generator. Can be skip_list::xorshift, skip_list::turbo_pascal or + user-provided one. See skip_list::random_level_generator option description for explanation. + Default is \p %skip_list::turbo_pascal. + - opt::allocator - allocator for skip-list node. Default is \ref CDS_DEFAULT_ALLOCATOR. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::Default is used. + - opt::stat - internal statistics. Available types: skip_list::stat, skip_list::empty_stat (the default) + */ + template < + typename Key, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = skip_list::type_traits +#else + typename Traits +#endif + > + class SkipListMap< cds::gc::nogc, Key, T, Traits >: +#ifdef CDS_DOXYGEN_INVOKED + protected SkipListSet< cds::gc::nogc, std::pair< Key const, T >, Traits > +#else + protected SkipListSet< + cds::gc::nogc + ,std::pair< Key const, T > + ,typename cds::opt::replace_key_accessor< Traits, skip_list::details::map_key_accessor >::type + > +#endif + { + //@cond + typedef SkipListSet< + cds::gc::nogc + ,std::pair< Key const, T > + ,typename cds::opt::replace_key_accessor< Traits, skip_list::details::map_key_accessor >::type + > base_class; + //@endcond + + public: + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef Key key_type ; ///< Key type + typedef T mapped_type ; ///< Mapped type + typedef std::pair< key_type const, mapped_type> value_type ; ///< Key-value pair stored in the map + typedef Traits options ; ///< Options specified + + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename base_class::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the skip-list nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename base_class::key_comparator key_comparator ; ///< key compare functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::stat stat ; ///< internal statistics type + typedef typename base_class::random_level_generator random_level_generator ; ///< random level generator + + protected: + //@cond + typedef typename base_class::node_type node_type; + typedef typename base_class::node_allocator node_allocator; + + /* + template + struct less_wrapper { + typedef Less less_op; + + bool operator()( value_type const& v1, value_type const& v2 ) const + { + return less_op()( v1.first, v2.first); + } + + template + bool operator()( value_type const& v1, Q const& v2 ) const + { + return less_op()( v1.first, v2 ); + } + + template + bool operator()( Q const& v1, value_type const& v2 ) const + { + return less_op()( v1, v2.first ); + } + }; + */ + //@endcond + + public: + /// Default constructor + SkipListMap() + : base_class() + {} + + /// Destructor clears the map + ~SkipListMap() + {} + + public: + /// Forward iterator + /** + Remember, the iterator operator -> and operator * returns \ref value_type pointer and reference. + To access item key and value use it->first and it->second respectively. + */ + typedef typename base_class::iterator iterator; + + /// Const forward iterator + typedef typename base_class::const_iterator const_iterator; + + /// Returns a forward iterator addressing the first element in a map + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return base_class::begin(); + } + + /// Returns an iterator that addresses the location succeeding the last element in a map + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return base_class::end(); + } + + /// Returns a forward const iterator addressing the first element in a map + //@{ + const_iterator begin() const + { + return base_class::begin(); + } + const_iterator cbegin() + { + return base_class::cbegin(); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a map + //@{ + const_iterator end() const + { + return base_class::end(); + } + const_iterator cend() + { + return base_class::cend(); + } + //@} + + public: + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( K const& key ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::insert( std::make_pair( key, mapped_type() ) ); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( K const& key, V const& val ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::insert( std::make_pair( key, val ) ); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted. item.second is a reference to item's value that may be changed. + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this map's item by concurrent threads. + The user-defined functor can be passed by reference using boost::ref + and it is called only if the inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p f functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert_key( K const& key, Func func ) + { + iterator it = insert( key ); + if ( it != end() ) + cds::unref( func )( (*it) ); + return it; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// For key \p key inserts data of type \ref mapped_type constructed with std::forward(args)... + /** + \p key_type should be constructible from type \p K + + Returns \p true if inserting successful, \p false otherwise. + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + iterator emplace( K&& key, Args&&... args ) + { + return base_class::emplace( std::forward(key), std::move(mapped_type(std::forward(args)...))); + } +# endif + + /// Ensures that the key \p key exists in the map + /** + The operation inserts new item if the key \p key is not found in the map. + Otherwise, the function returns an iterator that points to item found. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted, \p second is true if new item has been added or \p false if the item + already is in the list. + */ + template + std::pair ensure( K const& key ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::ensure( std::make_pair( key, mapped_type() )); + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_SkipListMap_nogc_find_val + + The function searches the item with key equal to \p key + and returns an iterator pointed to item found if the key is found, + and \ref end() otherwise + */ + template + iterator find( K const& key ) + { + return base_class::find( key ); + } + + /// Finds the key \p key with comparing functor \p cmp + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_nogc_find_val "find(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + iterator find_with( K const& key, Less pred ) const + { + return base_class::find_with( key, pred ); + } + + /// Gets minimum key from the map + /** + If the map is empty the function returns \p NULL + */ + value_type * get_min() const + { + return base_class::get_min(); + } + + /// Gets maximum key from the map + /** + The function returns \p NULL if the map is empty + */ + value_type * get_max() const + { + return base_class::get_max(); + } + + /// Clears the map (non-atomic) + /** + The function is not atomic. + Finding and/or inserting is prohibited while clearing. + Otherwise an unpredictable result may be encountered. + Thus, \p clear() may be used only for debugging purposes. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + Thus, the correct item counting feature is an important part of Michael's map implementation. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + + /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. + static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT + { + return base_class::max_height(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + }; + +}} // namespace cds::container + + +#endif // #ifndef __CDS_CONTAINER_SKIP_LIST_MAP_NOGC_H diff --git a/cds/container/skip_list_map_ptb.h b/cds/container/skip_list_map_ptb.h new file mode 100644 index 00000000..60cda768 --- /dev/null +++ b/cds/container/skip_list_map_ptb.h @@ -0,0 +1,11 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SKIP_LIST_SET_PTB_H +#define __CDS_CONTAINER_SKIP_LIST_SET_PTB_H + +#include +#include +#include +#include + +#endif // #ifndef __CDS_CONTAINER_SKIP_LIST_SET_PTB_H diff --git a/cds/container/skip_list_map_rcu.h b/cds/container/skip_list_map_rcu.h new file mode 100644 index 00000000..f15309ae --- /dev/null +++ b/cds/container/skip_list_map_rcu.h @@ -0,0 +1,807 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SKIP_LIST_MAP_RCU_H +#define __CDS_CONTAINER_SKIP_LIST_MAP_RCU_H + +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Lock-free skip-list map (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_nonintrusive_map + \anchor cds_nonintrusive_SkipListMap_rcu + + The implementation of well-known probabilistic data structure called skip-list + invented by W.Pugh in his papers: + - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees + - [1990] W.Pugh A Skip List Cookbook + + A skip-list is a probabilistic data structure that provides expected logarithmic + time search without the need of rebalance. The skip-list is a collection of sorted + linked list. Nodes are ordered by key. Each node is linked into a subset of the lists. + Each list has a level, ranging from 0 to 32. The bottom-level list contains + all the nodes, and each higher-level list is a sublist of the lower-level lists. + Each node is created with a random top level (with a random height), and belongs + to all lists up to that level. The probability that a node has the height 1 is 1/2. + The probability that a node has the height N is 1/2 ** N (more precisely, + the distribution depends on an random generator provided, but our generators + have this property). + + The lock-free variant of skip-list is implemented according to book + - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming", + chapter 14.4 "A Lock-Free Concurrent Skiplist" + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type". + - \p K - type of a key to be stored in the list. + - \p T - type of a value to be stored in the list. + - \p Traits - type traits. See skip_list::type_traits for explanation. + + It is possible to declare option-based list with cds::container::skip_list::make_traits metafunction istead of \p Traits template + argument. + Template argument list \p Options of cds::container::skip_list::make_traits metafunction are: + - opt::compare - key compare functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - skip_list::random_level_generator - random level generator. Can be skip_list::xorshift, skip_list::turbo_pascal or + user-provided one. See skip_list::random_level_generator option description for explanation. + Default is \p %skip_list::turbo_pascal. + - opt::allocator - allocator for skip-list node. Default is \ref CDS_DEFAULT_ALLOCATOR. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::Default is used. + - opt::stat - internal statistics. Available types: skip_list::stat, skip_list::empty_stat (the default) + - opt::rcu_check_deadlock - a deadlock checking policy. Default is opt::v::rcu_throw_deadlock + + Like STL map class, \p %SkipListMap stores its key-value pair as std:pair< K const, T>. + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + + Iterators + + The class supports a forward iterator (\ref iterator and \ref const_iterator). + The iteration is ordered. + You may iterate over skip-list set items only under RCU lock. + Only in this case the iterator is thread-safe since + while RCU is locked any set's item cannot be reclaimed. + + The requirement of RCU lock during iterating means that deletion of the elements (i.e. \ref erase) + is not possible. + + @warning The iterator object cannot be passed between threads + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced. + + */ + template < + typename RCU, + typename Key, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = skip_list::type_traits +#else + typename Traits +#endif + > + class SkipListMap< cds::urcu::gc< RCU >, Key, T, Traits >: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::SkipListSet< cds::urcu::gc< RCU >, std::pair, Traits > +#else + protected details::make_skip_list_map< cds::urcu::gc< RCU >, Key, T, Traits >::type +#endif + { + //@cond + typedef details::make_skip_list_map< cds::urcu::gc< RCU >, Key, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef Key key_type ; ///< Key type + typedef T mapped_type ; ///< Mapped type +# ifdef CDS_DOXYGEN_INVOKED + typedef std::pair< K const, T> value_type ; ///< Value type stored in the map +# else + typedef typename maker::value_type value_type; +# endif + typedef Traits options ; ///< Options specified + + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator allocator_type ; ///< Allocator type used for allocate/deallocate the skip-list nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator ; ///< key comparison functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename options::random_level_generator random_level_generator ; ///< random level generator + typedef typename options::stat stat ; ///< internal statistics type + + protected: + //@cond + typedef typename maker::node_type node_type; + typedef typename maker::node_allocator node_allocator; + + typedef std::unique_ptr< node_type, typename maker::node_deallocator > scoped_node_ptr; + //@endcond + + public: + typedef typename base_class::rcu_lock rcu_lock; ///< RCU scoped lock + /// Group of \p extract_xxx functions do not require external locking + static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; + + /// pointer to extracted node + typedef cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::intrusive_type_traits::disposer > exempt_ptr; + + protected: + //@cond + unsigned int random_level() + { + return base_class::random_level(); + } + + value_type * to_value_ptr( node_type * pNode ) const CDS_NOEXCEPT + { + return pNode ? &pNode->m_Value : null_ptr(); + } + //@endcond + + protected: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_insert_functor + { + void operator()( value_type& ) const + {} + }; + + template + class insert_value_functor + { + Q const& m_val; + public: + insert_value_functor( Q const& v) + : m_val(v) + {} + + void operator()( value_type& item ) + { + item.second = m_val; + } + }; + + template + class insert_key_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + insert_key_wrapper( Func f ): base_class(f) {} + + void operator()( node_type& item ) + { + base_class::get()( item.m_Value ); + } + }; + + template + class ensure_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + ensure_wrapper( Func f) : base_class(f) {} + + void operator()( bool bNew, node_type& item, node_type const& ) + { + base_class::get()( bNew, item.m_Value ); + } + }; + + template + struct erase_functor + { + Func m_func; + + erase_functor( Func f ) + : m_func(f) + {} + + void operator()( node_type& node ) + { + cds::unref(m_func)( node.m_Value ); + } + }; + + template + class find_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + find_wrapper( Func f ) + : base_class(f) + {} + + template + void operator()( node_type& item, Q& val ) + { + base_class::get()( item.m_Value, val ); + } + }; + + template + struct extract_copy_wrapper + { + Func m_func; + extract_copy_wrapper( Func f ) + : m_func(f) + {} + + template + void operator()( Q& dest, node_type& src ) + { + cds::unref(m_func)(dest, src.m_Value); + } + }; + +# endif // #ifndef CDS_CXX11_LAMBDA_SUPPORT + //@endcond + + public: + /// Default ctor + SkipListMap() + : base_class() + {} + + /// Destructor destroys the set object + ~SkipListMap() + {} + + public: + /// Iterator type + typedef skip_list::details::iterator< typename base_class::iterator > iterator; + + /// Const iterator type + typedef skip_list::details::iterator< typename base_class::const_iterator > const_iterator; + + /// Returns a forward iterator addressing the first element in a map + iterator begin() + { + return iterator( base_class::begin() ); + } + + /// Returns a forward const iterator addressing the first element in a map + //@{ + const_iterator begin() const + { + return cbegin(); + } + const_iterator cbegin() + { + return const_iterator( base_class::cbegin() ); + } + //@} + + /// Returns a forward iterator that addresses the location succeeding the last element in a map. + iterator end() + { + return iterator( base_class::end() ); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a map. + //@{ + const_iterator end() const + { + return cend(); + } + const_iterator cend() + { + return const_iterator( base_class::cend() ); + } + //@} + + public: + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from a value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K const& key ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return insert_key( key, [](value_type&){} ); +# else + return insert_key( key, empty_insert_functor() ); +# endif + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref value_type should be constructible from \p val of type \p V. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { + /* +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return insert_key( key, [&val](value_type& item) { item.second = val ; } ); +# else + insert_value_functor f(val); + return insert_key( key, cds::ref(f) ); +# endif + */ + scoped_node_ptr pNode( node_allocator().New( random_level(), key, val )); + if ( base_class::insert( *pNode )) + { + pNode.release(); + return true; + } + return false; + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + The user-defined functor can be passed by reference using boost::ref + and it is called only if inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool insert_key( const K& key, Func func ) + { + scoped_node_ptr pNode( node_allocator().New( random_level(), key )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( base_class::insert( *pNode, [&func]( node_type& item ) { cds::unref(func)( item.m_Value ); } )) +# else + insert_key_wrapper wrapper(func); + if ( base_class::insert( *pNode, cds::ref(wrapper) )) +#endif + { + pNode.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// For key \p key inserts data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + RCU \p synchronize method can be called. RCU should not be locked. + + @note This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( K&& key, Args&&... args ) + { + scoped_node_ptr pNode( node_allocator().New( random_level(), std::forward(key), std::forward(args)... )); + if ( base_class::insert( *pNode )) { + pNode.release(); + return true; + } + return false; + } +# endif + + + /// Ensures that the \p key exists in the map + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the map, then the new item created from \p key + is inserted into the map (note that in this case the \ref key_type should be + constructible from type \p K). + Otherwise, the functor \p func is called with item found. + The functor \p Func may be a function with signature: + \code + void func( bool bNew, value_type& item ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + + The functor may change any fields of the \p item.second that is \ref value_type. + + You may pass \p func argument by reference using boost::ref. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( K const& key, Func func ) + { + scoped_node_ptr pNode( node_allocator().New( random_level(), key )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair res = base_class::ensure( *pNode, + [&func](bool bNew, node_type& item, node_type const& ){ cds::unref(func)( bNew, item.m_Value ); } + ); +# else + ensure_wrapper wrapper( func ); + std::pair res = base_class::ensure( *pNode, cds::ref(wrapper) ); +# endif + if ( res.first && res.second ) + pNode.release(); + return res; + } + + /// Delete \p key from the map + /**\anchor cds_nonintrusive_SkipListMap_rcu_erase_val + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return base_class::erase(key); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_rcu_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred ) + { + return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >()); + } + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_SkipListMap_rcu_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + The functor may be passed by reference using boost:ref + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase( key, [&f]( node_type& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor wrapper(f); + return base_class::erase( key, cds::ref(wrapper)); +# endif + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_rcu_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >(), + [&f]( node_type& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor wrapper(f); + return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >(), cds::ref(wrapper)); +# endif + } + + /// Extracts the item from the map with specified \p key + /** \anchor cds_nonintrusive_SkipListMap_rcu_extract + The function searches an item with key equal to \p key in the map, + unlinks it from the set, and returns it in \p result parameter. + If the item with key equal to \p key is not found the function returns \p false. + + Note the compare functor from \p Traits class' template argument + should accept a parameter of type \p K that can be not the same as \p key_type. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item found. + The item will be implicitly freed when \p result object is destroyed or when + result.release() is called, see cds::urcu::exempt_ptr for explanation. + @note Before reusing \p result object you should call its \p release() method. + */ + template + bool extract( exempt_ptr& result, K const& key ) + { + return base_class::do_extract( result, key ); + } + + /// Extracts the item from the map with comparing functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_rcu_extract "extract(exempt_ptr&, K const&)" + but \p pred predicate is used for key comparing. + \p Less has the semantics like \p std::less. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + bool extract_with( exempt_ptr& result, K const& key, Less pred ) + { + return base_class::do_extract_with( result, key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >()); + } + + /// Extracts an item with minimal key from the map + /** + The function searches an item with minimal key, unlinks it, and returns the item found in \p result parameter. + If the skip-list is empty the function returns \p false. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item found. + The item will be implicitly freed when \p result object is destroyed or when + result.release() is called, see cds::urcu::exempt_ptr for explanation. + @note Before reusing \p result object you should call its \p release() method. + */ + bool extract_min( exempt_ptr& result ) + { + return base_class::do_extract_min(result); + } + + /// Extracts an item with maximal key from the map + /** + The function searches an item with maximal key, unlinks it from the set, and returns the item found + in \p result parameter. If the skip-list is empty the function returns \p false. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item found. + The item will be implicitly freed when \p result object is destroyed or when + result.release() is called, see cds::urcu::exempt_ptr for explanation. + @note Before reusing \p result object you should call its \p release() method. + */ + bool extract_max( exempt_ptr& result ) + { + return base_class::do_extract_max(result); + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_SkipListMap_rcu_find_cfunc + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + You can pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change \p item.second. + + The function applies RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( key, [&f](node_type& item, K const& ) { cds::unref(f)( item.m_Value );}); +# else + find_wrapper wrapper(f); + return base_class::find( key, cds::ref(wrapper) ); +# endif + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_rcu_find_cfunc "find(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >(), + [&f](node_type& item, K const& ) { cds::unref(f)( item.m_Value );}); +# else + find_wrapper wrapper(f); + return base_class::find_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >(), cds::ref(wrapper) ); +# endif + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_SkipListMap_rcu_find_val + + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool find( K const& key ) + { + return base_class::find( key ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_rcu_find_val "find(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred ) + { + return base_class::find_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >() ); + } + + /// Finds the key \p key and return the item found + /** \anchor cds_nonintrusive_SkipListMap_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns \p NULL. + + Note the compare functor in \p Traits class' template argument + should accept a parameter of type \p K that can be not the same as \p key_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::container::SkipListMap< cds::urcu::gc< cds::urcu::general_buffered<> >, int, foo, my_traits > skip_list; + skip_list theList; + // ... + { + // Lock RCU + skip_list::rcu_lock lock; + + skip_list::value_type * pVal = theList.get( 5 ); + // Deal with pVal + //... + + // Unlock RCU by rcu_lock destructor + // pVal can be freed at any time after RCU unlocking + } + \endcode + + After RCU unlocking the \p %force_dispose member function can be called manually, + see \ref force_dispose for explanation. + */ + template + value_type * get( K const& key ) + { + return to_value_ptr( base_class::get( key )); + } + + /// Finds the key \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_rcu_get "get(K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + value_type * get_with( K const& key, Less pred ) + { + return to_value_ptr( base_class::get_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >() )); + } + + /// Clears the map + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Clears internal list of ready-to-delete items passing them to RCU reclamation cycle + /** + See \ref cds_intrusive_SkipListSet_rcu_force_dispose "intrusive SkipListSet" for explanation + */ + void force_dispose() + { + return base_class::force_dispose(); + } + }; +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_SKIP_LIST_MAP_RCU_H diff --git a/cds/container/skip_list_set_hp.h b/cds/container/skip_list_set_hp.h new file mode 100644 index 00000000..4cd3cd0c --- /dev/null +++ b/cds/container/skip_list_set_hp.h @@ -0,0 +1,11 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SKIP_LIST_SET_HP_H +#define __CDS_CONTAINER_SKIP_LIST_SET_HP_H + +#include +#include +#include +#include + +#endif // #ifndef __CDS_CONTAINER_SKIP_LIST_SET_HP_H diff --git a/cds/container/skip_list_set_hrc.h b/cds/container/skip_list_set_hrc.h new file mode 100644 index 00000000..4b40362d --- /dev/null +++ b/cds/container/skip_list_set_hrc.h @@ -0,0 +1,11 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SKIP_LIST_SET_HRC_H +#define __CDS_CONTAINER_SKIP_LIST_SET_HRC_H + +#include +#include +#include +#include + +#endif // #ifndef __CDS_CONTAINER_SKIP_LIST_SET_HRC_H diff --git a/cds/container/skip_list_set_impl.h b/cds/container/skip_list_set_impl.h new file mode 100644 index 00000000..70abe853 --- /dev/null +++ b/cds/container/skip_list_set_impl.h @@ -0,0 +1,843 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SKIP_LIST_SET_IMPL_H +#define __CDS_CONTAINER_SKIP_LIST_SET_IMPL_H + +#include +#include +#include + +namespace cds { namespace container { + + /// Lock-free skip-list set + /** @ingroup cds_nonintrusive_set + \anchor cds_nonintrusive_SkipListSet_hp + + The implementation of well-known probabilistic data structure called skip-list + invented by W.Pugh in his papers: + - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees + - [1990] W.Pugh A Skip List Cookbook + + A skip-list is a probabilistic data structure that provides expected logarithmic + time search without the need of rebalance. The skip-list is a collection of sorted + linked list. Nodes are ordered by key. Each node is linked into a subset of the lists. + Each list has a level, ranging from 0 to 32. The bottom-level list contains + all the nodes, and each higher-level list is a sublist of the lower-level lists. + Each node is created with a random top level (with a random height), and belongs + to all lists up to that level. The probability that a node has the height 1 is 1/2. + The probability that a node has the height N is 1/2 ** N (more precisely, + the distribution depends on an random generator provided, but our generators + have this property). + + The lock-free variant of skip-list is implemented according to book + - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming", + chapter 14.4 "A Lock-Free Concurrent Skiplist" + + Template arguments: + - \p GC - Garbage collector used. + - \p T - type to be stored in the list. + - \p Traits - type traits. See skip_list::type_traits for explanation. + + It is possible to declare option-based list with cds::container::skip_list::make_traits metafunction istead of \p Traits template + argument. + Template argument list \p Options of cds::container::skip_list::make_traits metafunction are: + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - skip_list::random_level_generator - random level generator. Can be skip_list::xorshift, skip_list::turbo_pascal or + user-provided one. See skip_list::random_level_generator option description for explanation. + Default is \p %skip_list::turbo_pascal. + - opt::allocator - allocator for skip-list node. Default is \ref CDS_DEFAULT_ALLOCATOR. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::Default is used. + - opt::stat - internal statistics. Available types: skip_list::stat, skip_list::empty_stat (the default) + + \warning The skip-list requires up to 67 hazard pointers that may be critical for some GCs for which + the guard count is limited (like as gc::HP, gc::HRC). Those GCs should be explicitly initialized with + hazard pointer enough: \code cds::gc::HP myhp( 67 ) \endcode. Otherwise an run-time exception may be raised + when you try to create skip-list object. + + \note There are several specializations of \p %SkipListSet for each \p GC. You should include: + - for gc::HP garbage collector + - for gc::HRC garbage collector + - for gc::PTB garbage collector + - for \ref cds_nonintrusive_SkipListSet_rcu "RCU type" + - for \ref cds_nonintrusive_SkipListSet_nogc "non-deletable SkipListSet" + + Iterators + + The class supports a forward iterator (\ref iterator and \ref const_iterator). + The iteration is ordered. + The iterator object is thread-safe: the element pointed by the iterator object is guarded, + so, the element cannot be reclaimed while the iterator object is alive. + However, passing an iterator object between threads is dangerous. + + \warning Due to concurrent nature of skip-list set it is not guarantee that you can iterate + all elements in the set: any concurrent deletion can exclude the element + pointed by the iterator from the set, and your iteration can be terminated + before end of the set. Therefore, such iteration is more suitable for debugging purpose only + + Remember, each iterator object requires 2 additional hazard pointers, that may be + a limited resource for \p GC like as gc::HP and gc::HRC (for gc::PTB the count of + guards is unlimited). + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced. + + */ + template < + typename GC, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = skip_list::type_traits +#else + typename Traits +#endif + > + class SkipListSet: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::SkipListSet< GC, T, Traits > +#else + protected details::make_skip_list_set< GC, T, Traits >::type +#endif + { + //@cond + typedef details::make_skip_list_set< GC, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef T value_type ; ///< @anchor cds_containewr_SkipListSet_value_type Value type stored in the set + typedef Traits options ; ///< Options specified + + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator allocator_type ; ///< Allocator type used for allocate/deallocate the skip-list nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator ; ///< key comparison functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename options::random_level_generator random_level_generator ; ///< random level generator + typedef typename options::stat stat ; ///< internal statistics type + + protected: + //@cond + typedef typename maker::node_type node_type; + typedef typename maker::node_allocator node_allocator; + + typedef std::unique_ptr< node_type, typename maker::node_deallocator > scoped_node_ptr; + //@endcond + + public: + /// Guarded pointer + typedef cds::gc::guarded_ptr< gc, node_type, value_type, details::guarded_ptr_cast_set > guarded_ptr; + + protected: + //@cond + unsigned int random_level() + { + return base_class::random_level(); + } + //@endcond + + protected: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + template + struct insert_functor + { + Func m_func; + + insert_functor ( Func f ) + : m_func(f) + {} + + void operator()( node_type& node ) + { + cds::unref(m_func)( node.m_Value ); + } + }; + + template + struct ensure_functor + { + Func m_func; + Q const& m_arg; + + ensure_functor( Q const& arg, Func f ) + : m_func(f) + , m_arg( arg ) + {} + + void operator ()( bool bNew, node_type& node, node_type& ) + { + cds::unref(m_func)( bNew, node.m_Value, m_arg ); + } + }; + + template + struct find_functor + { + Func m_func; + + find_functor( Func f ) + : m_func(f) + {} + + template + void operator ()( node_type& node, Q& val ) + { + cds::unref(m_func)( node.m_Value, val ); + } + }; + + struct copy_value_functor { + template + void operator()( Q& dest, value_type const& src ) const + { + dest = src; + } + }; + + template + struct erase_functor + { + Func m_func; + + erase_functor( Func f ) + : m_func(f) + {} + + void operator()( node_type const& node ) + { + cds::unref(m_func)( node.m_Value ); + } + }; +# endif // ifndef CDS_CXX11_LAMBDA_SUPPORT + + //@endcond + + public: + /// Default ctor + SkipListSet() + : base_class() + {} + + /// Destructor destroys the set object + ~SkipListSet() + {} + + public: + /// Iterator type + typedef skip_list::details::iterator< typename base_class::iterator > iterator; + + /// Const iterator type + typedef skip_list::details::iterator< typename base_class::const_iterator > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + iterator begin() + { + return iterator( base_class::begin() ); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return const_iterator( base_class::begin() ); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() + { + return const_iterator( base_class::cbegin() ); + } + + /// Returns a forward iterator that addresses the location succeeding the last element in a set. + iterator end() + { + return iterator( base_class::end() ); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator end() const + { + return const_iterator( base_class::end() ); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator cend() + { + return const_iterator( base_class::cend() ); + } + + public: + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain as minimum the complete key for the node. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + scoped_node_ptr sp( node_allocator().New( random_level(), val )); + if ( base_class::insert( *sp.get() )) { + sp.release(); + return true; + } + return false; + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-fields of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. It may be passed by reference + using boost::ref + */ + template + bool insert( Q const& val, Func f ) + { + scoped_node_ptr sp( node_allocator().New( random_level(), val )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( base_class::insert( *sp.get(), [&f]( node_type& val ) { cds::unref(f)( val.m_Value ); } )) +# else + insert_functor wrapper(f); + if ( base_class::insert( *sp, cds::ref(wrapper) )) +# endif + { + sp.release(); + return true; + } + return false; + } + + /// Ensures that the item exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p val key not found in the set, then the new item created from \p val + is inserted into the set. Otherwise, the functor \p func is called with the item found. + The functor \p Func should be a function with signature: + \code + void func( bool bNew, value_type& item, const Q& val ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p key passed into the \p ensure function + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair ensure( const Q& val, Func func ) + { + scoped_node_ptr sp( node_allocator().New( random_level(), val )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair bRes = base_class::ensure( *sp, + [&func, &val](bool bNew, node_type& node, node_type&){ cds::unref(func)( bNew, node.m_Value, val ); }); +# else + ensure_functor wrapper( val, func ); + std::pair bRes = base_class::ensure( *sp, cds::ref(wrapper)); +# endif + if ( bRes.first && bRes.second ) + sp.release(); + return bRes; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref cds_containewr_SkipListSet_value_type "value_type" constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + @note This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr sp( node_allocator().New( random_level(), std::forward(args)... )); + if ( base_class::insert( *sp.get() )) { + sp.release(); + return true; + } + return false; + } +# endif + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_SkipListSet_erase_val + + The set item comparator should be able to compare the type \p value_type + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >() ); + } + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_SkipListSet_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type const& val); + }; + \endcode + The functor may be passed by reference using boost:ref + + Since the key of \p value_type is not explicitly specified, + template parameter \p Q defines the key type to search in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + + See also: \ref erase + */ + template + bool erase( Q const& key, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase( key, [&f]( node_type const& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor wrapper(f); + return base_class::erase( key, cds::ref(wrapper)); +# endif + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), + [&f]( node_type const& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor wrapper(f); + return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), + cds::ref(wrapper)); +# endif + } + + /// Extracts the item from the set with specified \p key + /** \anchor cds_nonintrusive_SkipListSet_hp_extract + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns it in \p result parameter. + If the item with key equal to \p key is not found the function returns \p false. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + The item extracted is freed automatically by garbage collector \p GC + when returned \ref guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp; + if ( theList.extract( gp, 5 ) ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard and frees the pointer + } + \endcode + */ + template + bool extract( guarded_ptr& result, Q const& key ) + { + return base_class::extract_( result.guard(), key, typename base_class::key_comparator() ); + } + + /// Extracts the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_hp_extract "extract(Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool extract_with( guarded_ptr& ptr, Q const& key, Less pred ) + { + typedef cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor > wrapped_less; + return base_class::extract_( ptr.guard(), key, cds::opt::details::make_comparator_from_less() ); + } + + /// Extracts an item with minimal key from the set + /** + The function searches an item with minimal key, unlinks it, and returns the item found in \p result parameter. + If the skip-list is empty the function returns \p false. + + The item extracted is freed automatically by garbage collector \p GC + when returned \ref guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::continer::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp; + if ( theList.extract_min( gp )) { + // Deal with gp + //... + } + // Destructor of gp releases internal HP guard and then frees the pointer + } + \endcode + */ + bool extract_min( guarded_ptr& result) + { + return base_class::extract_min_( result.guard() ); + } + + /// Extracts an item with maximal key from the set + /** + The function searches an item with maximal key, unlinks it, and returns the pointer to item found in \p result parameter. + If the skip-list is empty the function returns \p false. + + The item found is freed by garbage collector \p GC automatically + when returned \ref guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp; + if ( theList.extract_max( gp )) { + // Deal with gp + //... + } + // Destructor of gp releases internal HP guard and then frees the pointer + } + \endcode + */ + bool extract_max( guarded_ptr& result ) + { + return base_class::extract_max_( result.guard() ); + } + + /// Find the key \p val + /** \anchor cds_nonintrusive_SkipListSet_find_func + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( val, [&f]( node_type& node, Q& v ) { cds::unref(f)( node.m_Value, v ); }); +# else + find_functor wrapper(f); + return base_class::find( val, cds::ref(wrapper)); +# endif + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( val, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), + [&f]( node_type& node, Q& v ) { cds::unref(f)( node.m_Value, v ); } ); +# else + find_functor wrapper(f); + return base_class::find_with( val, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), cds::ref(wrapper)); +# endif + } + + /// Find the key \p val + /** \anchor cds_nonintrusive_SkipListSet_find_cfunc + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( val, [&f]( node_type& node, Q const& v ) { cds::unref(f)( node.m_Value, v ); }); +# else + find_functor wrapper(f); + return base_class::find( val, cds::ref(wrapper)); +# endif + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less cmp, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( val, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), + [&f]( node_type& node, Q const& v ) { cds::unref(f)( node.m_Value, v ); } ); +# else + find_functor wrapper(f); + return base_class::find_with( val, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), + cds::ref(wrapper)); +# endif + } + + /// Find the key \p val + /** \anchor cds_nonintrusive_SkipListSet_find_val + + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \ref value_type. + */ + template + bool find( Q const& val ) + { + return base_class::find( val ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred ) + { + return base_class::find_with( val, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >()); + } + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_SkipListSet_hp_get + The function searches the item with key equal to \p key + and assigns the item found to guarded pointer \p result. + The function returns \p true if \p key is found, and \p false otherwise. + If \p key is not found the \p result parameter is left unchanged. + + It is safe when a concurrent thread erases the item returned in \p result guarded pointer. + In this case the item will be freed later by garbage collector \p GC automatically + when \p guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp; + if ( theList.get( gp, 5 ) ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool get( guarded_ptr& result, Q const& key ) + { + return base_class::get_with_( result.guard(), key, typename base_class::key_comparator() ); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_hp_get "get( guarded_ptr&, Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool get_with( guarded_ptr& result, Q const& key, Less pred ) + { + typedef cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor > wrapped_less; + return base_class::get_with_( result.guard(), key, cds::opt::details::make_comparator_from_less< wrapped_less >()); + } + + /// Clears the set (non-atomic). + /** + The function deletes all items from the set. + The function is not atomic, thus, in multi-threaded environment with parallel insertions + this sequence + \code + set.clear(); + assert( set.empty() ); + \endcode + the assertion could be raised. + + For each item the \ref disposer provided by \p Traits template parameter will be called. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the set is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + /** + The value returned depends on item counter type provided by \p Traits template parameter. + If it is atomicity::empty_item_counter this function always returns 0. + Therefore, the function is not suitable for checking the set emptiness, use \ref empty + member function for this purpose. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_SKIP_LIST_SET_IMPL_H diff --git a/cds/container/skip_list_set_nogc.h b/cds/container/skip_list_set_nogc.h new file mode 100644 index 00000000..0dcee9e0 --- /dev/null +++ b/cds/container/skip_list_set_nogc.h @@ -0,0 +1,423 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SKIP_LIST_SET_NOGC_H +#define __CDS_CONTAINER_SKIP_LIST_SET_NOGC_H + +#include +#include +#include +#include +#include + +namespace cds { namespace container { + //@cond + namespace skip_list { namespace details { + struct set_key_accessor + { + template + typename NodeType::stored_value_type const& operator()( NodeType const& node ) const + { + return node.m_Value; + } + }; + }} // namespace skip_list::details + + namespace details { + template + struct make_skip_list_set_nogc + { + typedef cds::gc::nogc gc; + typedef T value_type; + typedef Traits type_traits; + + typedef cds::intrusive::skip_list::node< gc > intrusive_node_type; + struct node_type: public intrusive_node_type + { + typedef intrusive_node_type base_class; + typedef typename base_class::atomic_ptr atomic_ptr; + typedef atomic_ptr tower_item_type; + typedef value_type stored_value_type; + + value_type m_Value; + //atomic_ptr m_arrTower[] ; // allocated together with node_type in single memory block + + template + node_type( unsigned int nHeight, atomic_ptr * pTower, Q const& v ) + : m_Value(v) + { + if ( nHeight > 1 ) { + new (pTower) atomic_ptr[ nHeight - 1 ]; + base_class::make_tower( nHeight, pTower ); + } + } + +# ifdef CDS_EMPLACE_SUPPORT + template + node_type( unsigned int nHeight, atomic_ptr * pTower, Q&& q, Args&&... args ) + : m_Value( std::forward(q), std::forward(args)... ) + { + if ( nHeight > 1 ) { + new (pTower) atomic_ptr[ nHeight - 1 ]; + base_class::make_tower( nHeight, pTower ); + } + } +# endif + + private: + node_type() ; // no default ctor + }; + + typedef skip_list::details::node_allocator< node_type, type_traits> node_allocator; + + struct node_deallocator { + void operator ()( node_type * pNode ) + { + node_allocator().Delete( pNode ); + } + }; + + typedef skip_list::details::dummy_node_builder dummy_node_builder; + + typedef typename type_traits::key_accessor key_accessor; + typedef typename opt::details::make_comparator< value_type, type_traits >::type key_comparator; + + /* + template + struct less_wrapper { + typedef compare_wrapper< node_type, cds::opt::details::make_comparator_from_less, key_accessor > type; + }; + */ + + typedef typename cds::intrusive::skip_list::make_traits< + cds::opt::type_traits< type_traits > + ,cds::intrusive::opt::hook< intrusive::skip_list::base_hook< cds::opt::gc< gc > > > + ,cds::intrusive::opt::disposer< node_deallocator > + ,cds::intrusive::skip_list::internal_node_builder< dummy_node_builder > + ,cds::opt::compare< cds::details::compare_wrapper< node_type, key_comparator, key_accessor > > + >::type intrusive_type_traits; + + typedef cds::intrusive::SkipListSet< gc, node_type, intrusive_type_traits> type; + }; + } // namespace details + //@endcond + + /// Lock-free skip-list set (template specialization for gc::nogc) + /** @ingroup cds_nonintrusive_set + \anchor cds_nonintrusive_SkipListSet_nogc + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of list item. + See \ref cds_nonintrusive_SkipListSet_hp "SkipListSet" for detailed description. + + Template arguments: + - \p T - type to be stored in the list. + - \p Traits - type traits. See skip_list::type_traits for explanation. + + It is possible to declare option-based list with cds::container::skip_list::make_traits metafunction istead of \p Traits template + argument. \p Options template arguments of cds::container::skip_list::make_traits metafunction are: + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - skip_list::random_level_generator - random level generator. Can be skip_list::xorshift, skip_list::turbo_pascal or + user-provided one. See skip_list::random_level_generator option description for explanation. + Default is \p %skip_list::turbo_pascal. + - opt::allocator - allocator for skip-list node. Default is \ref CDS_DEFAULT_ALLOCATOR. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::Default is used. + - opt::stat - internal statistics. Available types: skip_list::stat, skip_list::empty_stat (the default) + */ + template < + typename T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = skip_list::type_traits +#else + class Traits +#endif + > + class SkipListSet< gc::nogc, T, Traits >: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::SkipListSet< cds::gc::nogc, T, Traits > +#else + protected details::make_skip_list_set_nogc< T, typename cds::opt::replace_key_accessor< Traits, skip_list::details::set_key_accessor >::type >::type +#endif + { + //@cond + typedef details::make_skip_list_set_nogc< T, typename cds::opt::replace_key_accessor< Traits, skip_list::details::set_key_accessor >::type > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef T value_type ; ///< Value type stored in the set + typedef Traits options ; ///< Options specified + + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator allocator_type ; ///< Allocator type used for allocate/deallocate the skip-list nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator ; ///< key compare functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename options::stat stat ; ///< internal statistics type + typedef typename base_class::random_level_generator random_level_generator ; ///< random level generator + + protected: + //@cond + typedef typename maker::node_type node_type; + typedef typename maker::node_allocator node_allocator; + typedef typename std::conditional< + std::is_same< typename options::key_accessor, opt::none >::value, + skip_list::details::set_key_accessor, + typename options::key_accessor + >::type key_accessor; + + typedef std::unique_ptr< node_type, typename maker::node_deallocator > scoped_node_ptr; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct ensure_functor + { + node_type * pNode; + void operator ()( bool bNew, node_type& node, node_type& ) + { + pNode = &node; + } + }; + + struct find_functor + { + node_type * pNode; + + template + void operator ()( node_type& node, Q& ) + { + pNode = &node; + } + }; +# endif + //@endcond + + public: + /// Iterator type + typedef skip_list::details::iterator< typename base_class::iterator > iterator; + + /// Const iterator type + typedef skip_list::details::iterator< typename base_class::const_iterator > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + iterator begin() + { + return iterator( base_class::begin() ); + } + + /// Returns a forward const iterator addressing the first element in a set + //@{ + const_iterator begin() const + { + return const_iterator( base_class::begin() ); + } + const_iterator cbegin() + { + return const_iterator( base_class::cbegin() ); + } + //@} + + /// Returns a forward iterator that addresses the location succeeding the last element in a set. + iterator end() + { + return iterator( base_class::end() ); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + //@{ + const_iterator end() const + { + return const_iterator( base_class::end() ); + } + const_iterator cend() + { + return const_iterator( base_class::cend() ); + } + //@} + + protected: + //@cond + static iterator node_to_iterator( node_type * pNode ) + { + assert( pNode ); + return iterator( base_class::iterator::from_node( pNode )); + } + //@endcond + + public: + /// Default ctor + SkipListSet() + : base_class() + {} + + /// Destructor destroys the set object + ~SkipListSet() + {} + + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Return an iterator pointing to inserted item if success, otherwise \ref end() + */ + template + iterator insert( const Q& val ) + { + scoped_node_ptr sp( node_allocator().New( base_class::random_level(), val )); + if ( base_class::insert( *sp.get() )) { + return node_to_iterator( sp.release() ); + } + return end(); + } + +#ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Return an iterator pointing to inserted item if success \ref end() otherwise + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + iterator emplace( Args&&... args ) + { + scoped_node_ptr sp( node_allocator().New( base_class::random_level(), std::forward(args)... )); + if ( base_class::insert( *sp.get() )) { + return node_to_iterator( sp.release() ); + } + return end(); + } +#endif + + /// Ensures that the item \p val exists in the set + /** + The operation inserts new item if the key \p val is not found in the set. + Otherwise, the function returns an iterator that points to item found. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted, \p second is true if new item has been added or \p false if the item + already is in the set. + */ + template + std::pair ensure( const Q& val ) + { + scoped_node_ptr sp( node_allocator().New( base_class::random_level(), val )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + node_type * pNode; + std::pair bRes = base_class::ensure( *sp, [&pNode](bool, node_type& item, node_type&) { pNode = &item; } ); + if ( bRes.first && bRes.second ) + sp.release(); + assert( pNode ); + return std::make_pair( node_to_iterator( pNode ), bRes.second ); +# else + ensure_functor f; + std::pair bRes = base_class::ensure( *sp, cds::ref(f) ); + if ( bRes.first && bRes.second ) + sp.release(); + assert( f.pNode ); + return std::make_pair( node_to_iterator( f.pNode ), bRes.second ); +# endif + } + + /// Searches \p key + /** \anchor cds_nonintrusive_SkipListSet_nogc_find_val + + The function searches the item with key equal to \p key + and returns an iterator pointed to item found if the key is found, + and \ref end() otherwise + */ + template + iterator find( Q const& key ) const + { + node_type * pNode = base_class::find( key ); + if ( pNode ) + return node_to_iterator( pNode ); + return base_class::nonconst_end(); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_nogc_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + iterator find_with( Q const& key, Less pred ) const + { + node_type * pNode = base_class::find_with( key, cds::details::predicate_wrapper< node_type, Less, key_accessor>() ); + if ( pNode ) + return node_to_iterator( pNode ); + return base_class::nonconst_end(); + } + + /// Gets minimum key from the set + /** + If the set is empty the function returns \p NULL + */ + value_type * get_min() const + { + node_type * pNode = base_class::get_min(); + return pNode ? &pNode->m_Value : null_ptr(); + } + + /// Gets maximum key from the set + /** + The function returns \p NULL if the set is empty + */ + value_type * get_max() const + { + node_type * pNode = base_class::get_max(); + return pNode ? &pNode->m_Value : null_ptr(); + } + + /// Clears the set (non-atomic) + /** + The function is not atomic. + Finding and/or inserting is prohibited while clearing. + Otherwise an unpredictable result may be encountered. + Thus, \p clear() may be used only for debugging purposes. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the set is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + /** + The value returned depends on item counter type provided by \p Traits template parameter. + If it is atomicity::empty_item_counter this function always returns 0. + The function is not suitable for checking the set emptiness, use \ref empty + member function for this purpose. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. + static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT + { + return base_class::max_height(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + }; + +}} // cds::container + +#endif // ifndef __CDS_CONTAINER_SKIP_LIST_SET_NOGC_H diff --git a/cds/container/skip_list_set_ptb.h b/cds/container/skip_list_set_ptb.h new file mode 100644 index 00000000..502bbcc8 --- /dev/null +++ b/cds/container/skip_list_set_ptb.h @@ -0,0 +1,11 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SKIP_LIST_MAP_PTB_H +#define __CDS_CONTAINER_SKIP_LIST_MAP_PTB_H + +#include +#include +#include +#include + +#endif // #ifndef __CDS_CONTAINER_SKIP_LIST_MAP_PTB_H diff --git a/cds/container/skip_list_set_rcu.h b/cds/container/skip_list_set_rcu.h new file mode 100644 index 00000000..a94aba44 --- /dev/null +++ b/cds/container/skip_list_set_rcu.h @@ -0,0 +1,883 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SKIP_LIST_SET_RCU_H +#define __CDS_CONTAINER_SKIP_LIST_SET_RCU_H + +#include +#include + +namespace cds { namespace container { + + /// Lock-free skip-list set (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_nonintrusive_set + \anchor cds_nonintrusive_SkipListSet_rcu + + The implementation of well-known probabilistic data structure called skip-list + invented by W.Pugh in his papers: + - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees + - [1990] W.Pugh A Skip List Cookbook + + A skip-list is a probabilistic data structure that provides expected logarithmic + time search without the need of rebalance. The skip-list is a collection of sorted + linked list. Nodes are ordered by key. Each node is linked into a subset of the lists. + Each list has a level, ranging from 0 to 32. The bottom-level list contains + all the nodes, and each higher-level list is a sublist of the lower-level lists. + Each node is created with a random top level (with a random height), and belongs + to all lists up to that level. The probability that a node has the height 1 is 1/2. + The probability that a node has the height N is 1/2 ** N (more precisely, + the distribution depends on an random generator provided, but our generators + have this property). + + The lock-free variant of skip-list is implemented according to book + - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming", + chapter 14.4 "A Lock-Free Concurrent Skiplist" + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type". + - \p T - type to be stored in the list. + - \p Traits - type traits. See skip_list::type_traits for explanation. + + It is possible to declare option-based list with cds::container::skip_list::make_traits metafunction istead of \p Traits template + argument. + Template argument list \p Options of cds::container::skip_list::make_traits metafunction are: + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - skip_list::random_level_generator - random level generator. Can be skip_list::xorshift, skip_list::turbo_pascal or + user-provided one. See skip_list::random_level_generator option description for explanation. + Default is \p %skip_list::turbo_pascal. + - opt::allocator - allocator for skip-list node. Default is \ref CDS_DEFAULT_ALLOCATOR. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::Default is used. + - opt::stat - internal statistics. Available types: skip_list::stat, skip_list::empty_stat (the default) + - opt::rcu_check_deadlock - a deadlock checking policy. Default is opt::v::rcu_throw_deadlock + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + + Iterators + + The class supports a forward iterator (\ref iterator and \ref const_iterator). + The iteration is ordered. + + You may iterate over skip-list set items only under RCU lock. + Only in this case the iterator is thread-safe since + while RCU is locked any set's item cannot be reclaimed. + + The requirement of RCU lock during iterating means that deletion of the elements (i.e. \ref erase) + is not possible. + + @warning The iterator object cannot be passed between threads + + Example how to use skip-list set iterators: + \code + // First, you should include the header for RCU type you have chosen + #include + #include + + typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type; + + struct Foo { + // ... + }; + + // Traits for your skip-list. + // At least, you should define cds::opt::less or cds::opt::compare for Foo struct + struct my_traits: public cds::continer::skip_list::type_traits + { + // ... + }; + typedef cds::container::SkipListSet< rcu_type, Foo, my_traits > my_skiplist_set; + + my_skiplist_set theSet; + + // ... + + // Begin iteration + { + // Apply RCU locking manually + typename rcu_type::scoped_lock sl; + + for ( auto it = theList.begin(); it != theList.end(); ++it ) { + // ... + } + + // rcu_type::scoped_lock destructor releases RCU lock implicitly + } + \endcode + + \warning Due to concurrent nature of skip-list set it is not guarantee that you can iterate + all elements in the set: any concurrent deletion can exclude the element + pointed by the iterator from the set, and your iteration can be terminated + before end of the set. Therefore, such iteration is more suitable for debugging purposes + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced. + */ + template < + typename RCU, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = skip_list::type_traits +#else + typename Traits +#endif + > + class SkipListSet< cds::urcu::gc< RCU >, T, Traits >: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::SkipListSet< cds::urcu::gc< RCU >, T, Traits > +#else + protected details::make_skip_list_set< cds::urcu::gc< RCU >, T, Traits >::type +#endif + { + //@cond + typedef details::make_skip_list_set< cds::urcu::gc< RCU >, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef T value_type ; ///< Value type stored in the set + typedef Traits options ; ///< Options specified + + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator allocator_type ; ///< Allocator type used for allocate/deallocate the skip-list nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator ; ///< key compare functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename options::random_level_generator random_level_generator ; ///< random level generator + typedef typename options::stat stat ; ///< internal statistics type + typedef typename options::rcu_check_deadlock rcu_check_deadlock ; ///< Deadlock checking policy + + protected: + //@cond + typedef typename maker::node_type node_type; + typedef typename maker::node_allocator node_allocator; + + typedef std::unique_ptr< node_type, typename maker::node_deallocator > scoped_node_ptr; + //@endcond + + public: + typedef typename base_class::rcu_lock rcu_lock; ///< RCU scoped lock + /// Group of \p extract_xxx functions do not require external locking + static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; + + /// pointer to extracted node + typedef cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::intrusive_type_traits::disposer > exempt_ptr; + + protected: + //@cond + unsigned int random_level() + { + return base_class::random_level(); + } + + value_type * to_value_ptr( node_type * pNode ) const CDS_NOEXCEPT + { + return pNode ? &pNode->m_Value : null_ptr(); + } + //@endcond + + protected: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + template + struct insert_functor + { + Func m_func; + + insert_functor ( Func f ) + : m_func(f) + {} + + void operator()( node_type& node ) + { + cds::unref(m_func)( node.m_Value ); + } + }; + + template + struct ensure_functor + { + Func m_func; + Q const& m_arg; + + ensure_functor( Q const& arg, Func f ) + : m_func(f) + , m_arg( arg ) + {} + + void operator ()( bool bNew, node_type& node, node_type& ) + { + cds::unref(m_func)( bNew, node.m_Value, m_arg ); + } + }; + + template + struct find_functor + { + Func m_func; + + find_functor( Func f ) + : m_func(f) + {} + + template + void operator ()( node_type& node, Q& val ) + { + cds::unref(m_func)( node.m_Value, val ); + } + }; + + template + struct erase_functor + { + Func m_func; + + erase_functor( Func f ) + : m_func(f) + {} + + void operator()( node_type const& node ) + { + cds::unref(m_func)( node.m_Value ); + } + }; + + template + struct extract_copy_wrapper + { + Func m_func; + extract_copy_wrapper( Func f ) + : m_func(f) + {} + + template + void operator()( Q& dest, node_type& src ) + { + cds::unref(m_func)(dest, src.m_Value); + } + }; + + struct extract_assign_wrapper + { + template + void operator()( Q& dest, node_type& src ) const + { + dest = src.m_Value; + } + }; +# endif // ifndef CDS_CXX11_LAMBDA_SUPPORT + + //@endcond + + public: + /// Default ctor + SkipListSet() + : base_class() + {} + + /// Destructor destroys the set object + ~SkipListSet() + {} + + public: + /// Iterator type + typedef skip_list::details::iterator< typename base_class::iterator > iterator; + + /// Const iterator type + typedef skip_list::details::iterator< typename base_class::const_iterator > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + iterator begin() + { + return iterator( base_class::begin() ); + } + + /// Returns a forward const iterator addressing the first element in a set + //@{ + const_iterator begin() const + { + return const_iterator( base_class::begin() ); + } + const_iterator cbegin() + { + return const_iterator( base_class::cbegin() ); + } + //@} + + /// Returns a forward iterator that addresses the location succeeding the last element in a set. + iterator end() + { + return iterator( base_class::end() ); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + //@{ + const_iterator end() const + { + return const_iterator( base_class::end() ); + } + const_iterator cend() + { + return const_iterator( base_class::cend() ); + } + //@} + + public: + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain as minimum the complete key for the node. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + scoped_node_ptr sp( node_allocator().New( random_level(), val )); + if ( base_class::insert( *sp.get() )) { + sp.release(); + return true; + } + return false; + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-fields of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. It may be passed by reference + using boost::ref + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool insert( Q const& val, Func f ) + { + scoped_node_ptr sp( node_allocator().New( random_level(), val )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( base_class::insert( *sp.get(), [&f]( node_type& val ) { cds::unref(f)( val.m_Value ); } )) +# else + insert_functor wrapper(f); + if ( base_class::insert( *sp, cds::ref(wrapper) )) +# endif + { + sp.release(); + return true; + } + return false; + } + + /// Ensures that the item exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p val key not found in the set, then the new item created from \p val + is inserted into the set. Otherwise, the functor \p func is called with the item found. + The functor \p Func should be a function with signature: + \code + void func( bool bNew, value_type& item, const Q& val ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p key passed into the \p ensure function + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair ensure( const Q& val, Func func ) + { + scoped_node_ptr sp( node_allocator().New( random_level(), val )); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair bRes = base_class::ensure( *sp, + [&func, &val](bool bNew, node_type& node, node_type&){ cds::unref(func)( bNew, node.m_Value, val ); }); +# else + ensure_functor wrapper( val, func ); + std::pair bRes = base_class::ensure( *sp, cds::ref(wrapper)); +# endif + if ( bRes.first && bRes.second ) + sp.release(); + return bRes; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + RCU \p synchronize method can be called. RCU should not be locked. + + @note This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr sp( node_allocator().New( random_level(), std::forward(args)... )); + if ( base_class::insert( *sp.get() )) { + sp.release(); + return true; + } + return false; + } +# endif + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_SkipListSet_rcu_erase_val + + The item comparator should be able to compare the type \p value_type + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_rcu_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >()); + } + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_SkipListSet_rcu_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type const& val); + }; + \endcode + The functor may be passed by reference using boost:ref + + Since the key of MichaelHashSet's \p value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + + See also: \ref erase + */ + template + bool erase( Q const& key, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase( key, [&f]( node_type const& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor wrapper(f); + return base_class::erase( key, cds::ref(wrapper)); +# endif + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), + [&f]( node_type const& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor wrapper(f); + return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), cds::ref(wrapper)); +# endif + } + + /// Extracts the item from the set with specified \p key + /** \anchor cds_nonintrusive_SkipListSet_rcu_extract + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns it in \p result parameter. + If the item with key equal to \p key is not found the function returns \p false. + + Note the compare functor from \p Traits class' template argument + should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item found. + The item will be implicitly freed when \p result object is destroyed or when + result.release() is called, see \p cds::urcu::exempt_ptr for explanation. + @note Before reusing \p result object you should call its \p release() method. + */ + template + bool extract( exempt_ptr& result, Q const& key ) + { + return base_class::do_extract( result, key ); + } + + /// Extracts the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_rcu_extract "extract(exempt_ptr&, Q const&)" + but \p pred predicate is used for key comparing. + \p Less has the semantics like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool extract_with( exempt_ptr& result, Q const& key, Less pred ) + { + return base_class::do_extract_with( result, key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >()); + } + + /// Extracts an item with minimal key from the set + /** + The function searches an item with minimal key, unlinks it, and returns the item found in \p result parameter. + If the skip-list is empty the function returns \p false. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item found. + The item will be implicitly freed when \p result object is destroyed or when + result.release() is called, see cds::urcu::exempt_ptr for explanation. + @note Before reusing \p result object you should call its \p release() method. + */ + bool extract_min( exempt_ptr& result ) + { + return base_class::do_extract_min(result); + } + + /// Extracts an item with maximal key from the set + /** + The function searches an item with maximal key, unlinks it from the set, and returns the item found + in \p result parameter. If the skip-list is empty the function returns \p false. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item found. + The item will be implicitly freed when \p result object is destroyed or when + result.release() is called, see cds::urcu::exempt_ptr for explanation. + @note Before reusing \p result object you should call its \p release() method. + */ + bool extract_max(exempt_ptr& result) + { + return base_class::do_extract_max(result); + } + + /// Find the key \p val + /** + @anchor cds_nonintrusive_SkipListSet_rcu_find_func + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \p value_type. + + The function applies RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( val, [&f]( node_type& node, Q& v ) { cds::unref(f)( node.m_Value, v ); }); +# else + find_functor wrapper(f); + return base_class::find( val, cds::ref(wrapper)); +# endif + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_rcu_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( val, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), + [&f]( node_type& node, Q& v ) { cds::unref(f)( node.m_Value, v ); } ); +# else + find_functor wrapper(f); + return base_class::find_with( val, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), + cds::ref(wrapper)); +# endif + } + + /// Find the key \p val + /** @anchor cds_nonintrusive_SkipListSet_rcu_find_cfunc + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \p value_type. + + The function applies RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( val, [&f]( node_type& node, Q const& v ) { cds::unref(f)( node.m_Value, v ); }); +# else + find_functor wrapper(f); + return base_class::find( val, cds::ref(wrapper)); +# endif + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_rcu_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the semantics like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( val, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), + [&f]( node_type& node, Q const& v ) { cds::unref(f)( node.m_Value, v ); } ); +# else + find_functor wrapper(f); + return base_class::find_with( val, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), + cds::ref(wrapper)); +# endif + } + + /// Find the key \p val + /** @anchor cds_nonintrusive_SkipListSet_rcu_find_val + + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \ref value_type. + + The function applies RCU lock internally. + */ + template + bool find( Q const & val ) + { + return base_class::find( val ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_rcu_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred ) + { + return base_class::find_with( val, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >()); + } + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_SkipListSet_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns \p NULL. + + Note the compare functor in \p Traits class' template argument + should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::container::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; + skip_list theList; + // ... + { + // Lock RCU + skip_list::rcu_lock lock; + + foo * pVal = theList.get( 5 ); + // Deal with pVal + //... + + // Unlock RCU by rcu_lock destructor + // pVal can be freed at any time after RCU unlocking + } + \endcode + + After RCU unlocking the \p %force_dispose member function can be called manually, + see \ref force_dispose for explanation. + */ + template + value_type * get( Q const& key ) + { + return to_value_ptr( base_class::get( key )); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + value_type * get_with( Q const& val, Less pred ) + { + return to_value_ptr( base_class::get_with( val, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >() )); + } + + /// Clears the set (non-atomic). + /** + The function deletes all items from the set. + The function is not atomic, thus, in multi-threaded environment with parallel insertions + this sequence + \code + set.clear(); + assert( set.empty() ); + \endcode + the assertion could be raised. + + For each item the \ref disposer provided by \p Traits template parameter will be called. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the set is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + /** + The value returned depends on item counter type provided by \p Traits template parameter. + If it is atomicity::empty_item_counter this function always returns 0. + Therefore, the function is not suitable for checking the set emptiness, use \ref empty + member function for this purpose. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Clears internal list of ready-to-delete items passing them to RCU reclamation cycle + /** + See \ref cds_intrusive_SkipListSet_rcu_force_dispose "intrusive SkipListSet" for explanation + */ + void force_dispose() + { + return base_class::force_dispose(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_SKIP_LIST_SET_RCU_H diff --git a/cds/container/split_list_base.h b/cds/container/split_list_base.h new file mode 100644 index 00000000..18c96ebe --- /dev/null +++ b/cds/container/split_list_base.h @@ -0,0 +1,176 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SPLIT_LIST_BASE_H +#define __CDS_CONTAINER_SPLIT_LIST_BASE_H + +#include + +namespace cds { namespace container { + + // forward declaration + struct michael_list_tag; + + /// SplitListSet related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace split_list { + using intrusive::split_list::dynamic_bucket_table; + + //@cond + namespace details { + + template + struct wrap_map_traits_helper { + typedef Opt key_accessor; + }; + + template + struct wrap_map_traits_helper + { + struct key_accessor + { + typedef Key key_type; + key_type const & operator()( std::pair const & val ) const + { + return val.first; + } + }; + }; + + template + struct wrap_map_traits: public Traits + { + typedef typename wrap_map_traits_helper::key_accessor key_accessor; + }; + + template + struct wrap_set_traits_helper { + typedef Opt key_accessor; + }; + + template + struct wrap_set_traits_helper + { + struct key_accessor + { + typedef Value key_type; + key_type const& operator()( Value const& val ) const + { + return val; + } + }; + }; + + template + struct wrap_set_traits: public Traits + { + typedef typename wrap_set_traits_helper::key_accessor key_accessor; + }; + } // namespace details + //@endcond + + + /// Type traits for SplitListSet class + /** + Note, the SplitListSet type traits is based on intrusive::split_list::type_traits. + Any member declared in intrusive::split_list::type_traits is also applied to + container::split_list::type_traits. + */ + struct type_traits: public intrusive::split_list::type_traits + { + // Ordered list implementation + /** + This option selects appropriate ordered-list implementation for split-list. + It may be \ref michael_list_tag or \ref lazy_list_tag. + */ + typedef michael_list_tag ordered_list; + + // Ordered list traits + /** + With this option you can specify type traits for selected ordered list class. + If this option is opt::none, the ordered list traits is combined with default + ordered list traits and split-list traits. + + For \p michael_list_tag, the default traits is \ref container::michael_list::type_traits. + + For \p lazy_list_tag, the default traits is \ref container::lazy_list::type_traits. + */ + typedef opt::none ordered_list_traits; + + //@cond + typedef opt::none key_accessor; + //@endcond + }; + + /// Option to select ordered list class for split-list + /** + This option selects appropriate ordered list class for containers based on split-list. + Template parameter \p Type may be \ref michael_list_tag or \ref lazy_list_tag. + */ + template + struct ordered_list + { + //@cond + template struct pack: public Base + { + typedef Type ordered_list; + }; + //@endcond + }; + + /// Option to specify ordered list type traits + /** + The \p Type template parameter specifies ordered list type traits. + It depends on type of ordered list selected. + */ + template + struct ordered_list_traits + { + //@cond + template struct pack: public Base + { + typedef Type ordered_list_traits; + }; + //@endcond + }; + + /// Metafunction converting option list to traits struct + /** + Available \p Options: + - split_list::ordered_list - a tag for ordered list implementation. + See split_list::ordered_list for possible values. + - split_list::ordered_list_traits - type traits for ordered list implementation. + For MichaelList use container::michael_list::type_traits, + for LazyList use container::lazy_list::type_traits. + - plus any option from intrusive::split_list::make_traits + */ + template + struct make_traits { + typedef typename cds::opt::make_options< type_traits, CDS_OPTIONS8>::type type ; ///< Result of metafunction + }; + } // namespace split_list + + //@cond + // Forward declarations + template + class SplitListSet; + + template + class SplitListMap; + //@endcond + + //@cond + // Forward declaration + namespace details { + template + struct make_split_list_set; + + template + struct make_split_list_map; + } + //@endcond + +}} // namespace cds::container + + +#endif // #ifndef __CDS_CONTAINER_SPLIT_LIST_BASE_H diff --git a/cds/container/split_list_map.h b/cds/container/split_list_map.h new file mode 100644 index 00000000..d033d24c --- /dev/null +++ b/cds/container/split_list_map.h @@ -0,0 +1,660 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SPLIT_LIST_MAP_H +#define __CDS_CONTAINER_SPLIT_LIST_MAP_H + +#include +#include + +namespace cds { namespace container { + + /// Split-ordered list map + /** @ingroup cds_nonintrusive_map + \anchor cds_nonintrusive_SplitListMap_hp + + Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see + - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables" + - [2008] Nir Shavit "The Art of Multiprocessor Programming" + + See intrusive::SplitListSet for a brief description of the split-list algorithm. + + Template parameters: + - \p GC - Garbage collector used + - \p Key - key type of an item stored in the map. It should be copy-constructible + - \p Value - value type stored in the map + - \p Traits - type traits, default is split_list::type_traits. Instead of declaring split_list::type_traits -based + struct you may apply option-based notation with split_list::make_traits metafunction. + + There are the specializations: + - for \ref cds_urcu_desc "RCU" - declared in cd/container/split_list_map_rcu.h, + see \ref cds_nonintrusive_SplitListMap_rcu "SplitListMap". + - for \ref cds::gc::nogc declared in cds/container/split_list_map_nogc.h, + see \ref cds_nonintrusive_SplitListMap_nogc "SplitListMap". + + \par Usage + + You should decide what garbage collector you want, and what ordered list you want to use. Split-ordered list + is original data structure based on an ordered list. Suppose, you want construct split-list map based on gc::HP GC + and MichaelList as ordered list implementation. Your map should map \p int key to std::string value. + So, you beginning your program with following include: + \code + #include + #include + + namespace cc = cds::container; + \endcode + The inclusion order is important: first, include file for ordered-list implementation (for this example, cds/container/michael_list_hp.h), + then the header for split-list map cds/container/split_list_map.h. + + Now, you should declare traits for split-list map. The main parts of traits are a hash functor for the map key and a comparing functor for ordered list. + We use std::hash as hash functor and std::less predicate as comparing functor. + + The second attention: instead of using \p %MichaelList in \p %SplitListMap traits we use a tag cds::contaner::michael_list_tag for the Michael's list. + The split-list requires significant support from underlying ordered list class and it is not good idea to dive you + into deep implementation details of split-list and ordered list interrelations. The tag paradigm simplifies split-list interface. + + \code + // SplitListMap traits + struct foo_set_traits: public cc::split_list::type_traits + { + typedef cc::michael_list_tag ordered_list ; // what type of ordered list we want to use + typedef std::hash hash ; // hash functor for the key stored in split-list map + + // Type traits for our MichaelList class + struct ordered_list_traits: public cc::michael_list::type_traits + { + typedef std::less less ; // use our std::less predicate as comparator to order list nodes + }; + }; + \endcode + + Now you are ready to declare our map class based on SplitListMap: + \code + typedef cc::SplitListMap< cds::gc::PTB, int, std::string, foo_set_traits > int_string_map; + \endcode + + You may use the modern option-based declaration instead of classic type-traits-based one: + \code + typedef cc:SplitListMap< + cs::gc::PTB // GC used + ,int // key type + ,std::string // value type + ,cc::split_list::make_traits< // metafunction to build split-list traits + cc::split_list::ordered_list // tag for underlying ordered list implementation + ,cc::opt::hash< std::hash > // hash functor + ,cc::split_list::ordered_list_traits< // ordered list traits desired + cc::michael_list::make_traits< // metafunction to build lazy list traits + cc::opt::less< std::less > // less-based compare functor + >::type + > + >::type + > int_string_map; + \endcode + In case of option-based declaration using split_list::make_traits metafunction the struct \p foo_set_traits is not required. + + Now, the map of type \p int_string_map is ready to use in your program. + + Note that in this example we show only mandatory type_traits parts, optional ones is the default and they are inherited + from cds::container::split_list::type_traits. + The cds library contains many other options for deep tuning of behavior of the split-list and + ordered-list containers. + */ + template < + class GC, + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = split_list::type_traits +#else + class Traits +#endif + > + class SplitListMap: + protected container::SplitListSet< + GC, + std::pair, + split_list::details::wrap_map_traits + > + { + //@cond + typedef container::SplitListSet< + GC, + std::pair, + split_list::details::wrap_map_traits + > base_class; + //@endcond + + public: + typedef typename base_class::gc gc ; ///< Garbage collector + typedef Key key_type ; ///< key type + typedef Value mapped_type ; ///< type of value stored in the map + typedef Traits options ; ///< \p Traits template argument + + typedef std::pair value_type ; ///< key-value pair type + typedef typename base_class::ordered_list ordered_list; ///< Underlying ordered list class + typedef typename base_class::key_comparator key_comparator ; ///< key compare functor + + typedef typename base_class::hash hash ; ///< Hash functor for \ref key_type + typedef typename base_class::item_counter item_counter ; ///< Item counter type + + protected: + //@cond + typedef typename base_class::maker::type_traits::key_accessor key_accessor; + typedef typename base_class::node_type node_type; + + public: + /// Guarded pointer + typedef cds::gc::guarded_ptr< gc, node_type, value_type, details::guarded_ptr_cast_set > guarded_ptr; + + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + template + class ensure_functor_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + ensure_functor_wrapper() {} + ensure_functor_wrapper( Func f ): base_class(f) {} + + template + void operator()( bool bNew, value_type& item, const Q& /*val*/ ) + { + base_class::get()( bNew, item ); + } + }; + + template + class find_functor_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + find_functor_wrapper() {} + find_functor_wrapper( Func f ): base_class(f) {} + + template + void operator()( value_type& pair, Q const& /*val*/ ) + { + base_class::get()( pair ); + } + }; +# endif // ifndef CDS_CXX11_LAMBDA_SUPPORT + //@endcond + + public: + /// Forward iterator (see SplitListSet::iterator) + /** + Remember, the iterator operator -> and operator * returns \ref value_type pointer and reference. + To access item key and value use it->first and it->second respectively. + */ + typedef typename base_class::iterator iterator; + + /// Const forward iterator (see SplitListSet::const_iterator) + typedef typename base_class::const_iterator const_iterator; + + /// Returns a forward iterator addressing the first element in a map + /** + For empty map \code begin() == end() \endcode + */ + iterator begin() + { + return base_class::begin(); + } + + /// Returns an iterator that addresses the location succeeding the last element in a map + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the map. + For empty map \code begin() == end() \endcode + */ + iterator end() + { + return base_class::end(); + } + + /// Returns a forward const iterator addressing the first element in a map + //@{ + const_iterator begin() const + { + return base_class::begin(); + } + const_iterator cbegin() + { + return base_class::cbegin(); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a map + //@{ + const_iterator end() const + { + return base_class::end(); + } + const_iterator cend() + { + return base_class::cend(); + } + //@} + + public: + /// Initializes split-ordered map of default capacity + /** + The default capacity is defined in bucket table constructor. + See intrusive::split_list::expandable_bucket_table, intrusive::split_list::static_bucket_table + which selects by intrusive::split_list::dynamic_bucket_table option. + */ + SplitListMap() + : base_class() + {} + + /// Initializes split-ordered map + SplitListMap( + size_t nItemCount ///< estimate average item count + , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 10, default is 1. + ) + : base_class( nItemCount, nLoadFactor ) + {} + + public: + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K const& key ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::insert( std::make_pair( key, mapped_type() ) ); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + Returns \p true if \p val is inserted into the map, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::insert( std::make_pair(key, val) ); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + It should be keep in mind that concurrent modifications of \p item.second may be possible. + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this \p item by concurrent threads. + + The user-defined functor can be passed by reference using boost::ref + and it is called only if inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + */ + template + bool insert_key( K const& key, Func func ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::insert( std::make_pair( key, mapped_type() ), func ); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// For key \p key inserts data of type \ref mapped_type constructed with std::forward(args)... + /** + \p key_type should be constructible from type \p K + + Returns \p true if inserting successful, \p false otherwise. + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( K&& key, Args&&... args ) + { + return base_class::emplace( std::forward(key), std::move(mapped_type(std::forward(args)...))); + } +# endif + + + /// Ensures that the \p key exists in the map + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the map, then the new item created from \p key + is inserted into the map (note that in this case the \ref key_type should be + constructible from type \p K). + Otherwise, the functor \p func is called with item found. + The functor \p Func may be a function with signature: + \code + void func( bool bNew, value_type& item ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + + The functor may change any fields of the \p item.second that is \ref mapped_type; + however, \p func must guarantee that during changing no any other modifications + could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( K const& key, Func func ) + { + //TODO: pass arguments by reference (make_pair makes copy) +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::ensure( std::make_pair( key, mapped_type() ), + [&func](bool bNew, value_type& item, value_type const& /*val*/) { + cds::unref(func)( bNew, item ); + } ); +# else + ensure_functor_wrapper fw( func ); + return base_class::ensure( std::make_pair( key, mapped_type() ), cds::ref(fw) ); +# endif + } + + /// Deletes \p key from the map + /** \anchor cds_nonintrusive_SplitListMap_erase_val + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred ) + { + return base_class::erase_with( key, cds::details::predicate_wrapper() ); + } + + /// Deletes \p key from the map + /** \anchor cds_nonintrusive_SplitListMap_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface is: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + The functor may be passed by reference using boost:ref + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + return base_class::erase( key, f ); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + return base_class::erase_with( key, cds::details::predicate_wrapper(), f ); + } + + /// Extracts the item with specified \p key + /** \anchor cds_nonintrusive_SplitListMap_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the map, and returns it in \p dest parameter. + If the item with key equal to \p key is not found the function returns \p false. + + Note the compare functor should accept a parameter of type \p K that may be not the same as \p value_type. + + The extracted item is freed automatically when returned \ref guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::SplitListMap< your_template_args > splitlist_map; + splitlist_map theMap; + // ... + { + splitlist_map::guarded_ptr gp; + theMap.extract( gp, 5 ); + // Deal with gp + // ... + + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + bool extract( guarded_ptr& dest, K const& key ) + { + return base_class::extract_( dest.guard(), key ); + } + + /// Extracts the item using compare functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_hp_extract "extract(guarded_ptr&, K const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + bool extract_with( guarded_ptr& dest, K const& key, Less pred ) + { + return base_class::extract_with_( dest.guard(), key, cds::details::predicate_wrapper() ); + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_SplitListMap_find_cfunc + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change \p item.second. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the map's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( key, [&f](value_type& pair, K const&){ cds::unref(f)( pair ); } ); +# else + find_functor_wrapper fw(f); + return base_class::find( key, cds::ref(fw) ); +# endif + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_find_cfunc "find(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( key, + cds::details::predicate_wrapper(), + [&f](value_type& pair, K const&){ cds::unref(f)( pair ); } ); +# else + find_functor_wrapper fw(f); + return base_class::find_with( key, cds::details::predicate_wrapper(), cds::ref(fw) ); +# endif + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_SplitListMap_find_val + + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool find( K const& key ) + { + return base_class::find( key ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_find_val "find(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred ) + { + return base_class::find( key, cds::details::predicate_wrapper() ); + } + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_SplitListMap_hp_get + The function searches the item with key equal to \p key + and assigns the item found to guarded pointer \p ptr. + The function returns \p true if \p key is found, and \p false otherwise. + If \p key is not found the \p ptr parameter is not changed. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::SplitListMap< your_template_params > splitlist_map; + splitlist_map theMap; + // ... + { + splitlist_map::guarded_ptr gp; + if ( theMap.get( gp, 5 )) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for split-list map + should accept a parameter of type \p K that can be not the same as \p value_type. + */ + template + bool get( guarded_ptr& ptr, K const& key ) + { + return base_class::get_( ptr.guard(), key ); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_hp_get "get( guarded_ptr&, K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + bool get_with( guarded_ptr& ptr, K const& key, Less pred ) + { + return base_class::get_with_( ptr.guard(), key, cds::details::predicate_wrapper() ); + } + + /// Clears the map (non-atomic) + /** + The function unlink all items from the map. + The function is not atomic and not lock-free and should be used for debugging only. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + Thus, the correct item counting is an important part of the map implementation. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + }; + + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_SPLIT_LIST_MAP_H diff --git a/cds/container/split_list_map_nogc.h b/cds/container/split_list_map_nogc.h new file mode 100644 index 00000000..af963c6a --- /dev/null +++ b/cds/container/split_list_map_nogc.h @@ -0,0 +1,289 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SPLIT_LIST_MAP_NOGC_H +#define __CDS_CONTAINER_SPLIT_LIST_MAP_NOGC_H + +#include +#include + +namespace cds { namespace container { + + /// Split-ordered list map (template specialization for gc::nogc) + /** @ingroup cds_nonintrusive_map + \anchor cds_nonintrusive_SplitListMap_nogc + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of list item. + + See \ref cds_nonintrusive_SplitListMap_hp "SplitListMap" for description of template parameters. + + The interface of the specialization is a slightly different. + */ + template < + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = split_list::type_traits +#else + class Traits +#endif + > + class SplitListMap: + protected container::SplitListSet< + cds::gc::nogc, + std::pair, + split_list::details::wrap_map_traits + > + { + //@cond + typedef container::SplitListSet< + cds::gc::nogc, + std::pair, + split_list::details::wrap_map_traits + > base_class; + //@endcond + public: + typedef typename base_class::gc gc ; ///< Garbage collector + typedef Key key_type ; ///< key type + typedef Value mapped_type ; ///< type of value stored in the map + + typedef std::pair value_type ; ///< Pair type + typedef typename base_class::ordered_list ordered_list; ///< Underlying ordered list class + typedef typename base_class::key_comparator key_comparator ; ///< key comparison functor + + typedef typename base_class::hash hash ; ///< Hash functor for \ref key_type + typedef typename base_class::item_counter item_counter ; ///< Item counter type + + protected: + //@cond + typedef typename base_class::options::type_traits::key_accessor key_accessor; + //@endcond + + public: + /// Forward iterator (see SplitListSet::iterator) + /** + Remember, the iterator operator -> and operator * returns \ref value_type pointer and reference. + To access item key and value use it->first and it->second respectively. + */ + typedef typename base_class::iterator iterator; + + /// Const forward iterator (see SplitListSet::const_iterator) + typedef typename base_class::const_iterator const_iterator; + + /// Returns a forward iterator addressing the first element in a map + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return base_class::begin(); + } + + /// Returns an iterator that addresses the location succeeding the last element in a map + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return base_class::end(); + } + + /// Returns a forward const iterator addressing the first element in a map + //@{ + const_iterator begin() const + { + return base_class::begin(); + } + const_iterator cbegin() + { + return base_class::cbegin(); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a map + //@{ + const_iterator end() const + { + return base_class::end(); + } + const_iterator cend() + { + return base_class::cend(); + } + //@} + + public: + /// Initialize split-ordered map of default capacity + /** + The default capacity is defined in bucket table constructor. + See intrusive::split_list::expandable_bucket_table, intrusive::split_list::static_ducket_table + which selects by intrusive::split_list::dynamic_bucket_table option. + */ + SplitListMap() + : base_class() + {} + + /// Initialize split-ordered map + SplitListMap( + size_t nItemCount ///< estimate average item count + , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 10, default is 1. + ) + : base_class( nItemCount, nLoadFactor ) + {} + + public: + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( K const& key ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::insert( std::make_pair( key, mapped_type() ) ); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( K const& key, V const& val ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::insert( std::make_pair( key, val ) ); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted. item.second is a reference to item's value that may be changed. + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this map's item by concurrent threads. + The user-defined functor can be passed by reference using boost::ref + and it is called only if the inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p f functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert_key( const K& key, Func func ) + { + iterator it = insert( key ); + if ( it != end() ) + cds::unref( func )( (*it) ); + return it; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// For key \p key inserts data of type \ref mapped_type constructed with std::forward(args)... + /** + \p key_type should be constructible from type \p K + + Returns \p true if inserting successful, \p false otherwise. + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + iterator emplace( K&& key, Args&&... args ) + { + return base_class::emplace( std::forward(key), std::move(mapped_type(std::forward(args)...))); + } +# endif + + /// Ensures that the key \p key exists in the map + /** + The operation inserts new item if the key \p key is not found in the map. + Otherwise, the function returns an iterator that points to item found. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted, \p second is true if new item has been added or \p false if the item + already is in the list. + */ + template + std::pair ensure( K const& key ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::ensure( std::make_pair( key, mapped_type() )); + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_SplitListMap_nogc_find + + The function searches the item with key equal to \p key + and returns an iterator pointed to item found if the key is found, + and \ref end() otherwise + */ + template + iterator find( K const& key ) + { + return base_class::find( key ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_nogc_find "find(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + iterator find_with( K const& key, Less pred ) + { + return base_class::find_with( key, cds::details::predicate_wrapper() ); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + Thus, the correct item counting feature is an important part of Michael's map implementation. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + }; +}} // namespace cds::container + + +#endif // #ifndef __CDS_CONTAINER_SPLIT_LIST_MAP_NOGC_H diff --git a/cds/container/split_list_map_rcu.h b/cds/container/split_list_map_rcu.h new file mode 100644 index 00000000..e7544773 --- /dev/null +++ b/cds/container/split_list_map_rcu.h @@ -0,0 +1,725 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SPLIT_LIST_MAP_RCU_H +#define __CDS_CONTAINER_SPLIT_LIST_MAP_RCU_H + +#include +#include + +namespace cds { namespace container { + + /// Split-ordered list map (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_nonintrusive_map + \anchor cds_nonintrusive_SplitListMap_rcu + + Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see + - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables" + - [2008] Nir Shavit "The Art of Multiprocessor Programming" + + See intrusive::SplitListSet for a brief description of the split-list algorithm. + + Template parameters: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p Key - key type of an item stored in the map. It should be copy-constructible + - \p Value - value type stored in the map + - \p Traits - type traits, default is split_list::type_traits. Instead of declaring split_list::type_traits -based + struct you may apply option-based notation with split_list::make_traits metafunction. + + Iterators + + The class supports a forward iterator (\ref iterator and \ref const_iterator). + The iteration is unordered. + + You may iterate over split-list map items only under RCU lock. + Only in this case the iterator is thread-safe since + while RCU is locked any map's item cannot be reclaimed. + + The requirement of RCU lock during iterating means that deletion of the elements (i.e. \ref erase) + is not possible. + + @warning The iterator object cannot be passed between threads + + \warning Due to concurrent nature of split-list map it is not guarantee that you can iterate + all elements in the map: any concurrent deletion can exclude the element + pointed by the iterator from the map, and your iteration can be terminated + before end of the map. Therefore, such iteration is more suitable for debugging purposes + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced. + + \par Usage + + You should decide what garbage collector you want, and what ordered list you want to use. Split-ordered list + is original data structure based on an ordered list. Suppose, you want construct split-list map based on cds::urcu::general_buffered<> GC + and MichaelList as ordered list implementation. Your map should map \p int key to std::string value. + So, you beginning your program with following include: + \code + #include + #include + #include + + namespace cc = cds::container; + \endcode + The inclusion order is important: + - first, include one of \ref cds_urcu_gc "RCU implementation" (cds/urcu/general_buffered.h in our case) + - second, include file for ordered-list implementation (for this example, cds/container/michael_list_rcu.h), + - then, the header for RCU-based split-list map cds/container/split_list_map_rcu.h. + + Now, you should declare traits for split-list map. The main parts of traits are a hash functor for the map key and a comparing functor for ordered list. + We use std::hash as hash functor and std::less predicate as comparing functor. + + The second attention: instead of using %MichaelList in %SplitListMap traits we use a tag cds::contaner::michael_list_tag + for the Michael's list. + The split-list requires significant support from underlying ordered list class and it is not good idea to dive you + into deep implementation details of split-list and ordered list interrelations. The tag paradigm simplifies split-list interface. + + \code + // SplitListMap traits + struct foo_set_traits: public cc::split_list::type_traits + { + typedef cc::michael_list_tag ordered_list ; // what type of ordered list we want to use + typedef std::hash hash ; // hash functor for the key stored in split-list map + + // Type traits for our MichaelList class + struct ordered_list_traits: public cc::michael_list::type_traits + { + typedef std::less less ; // use our std::less predicate as comparator to order list nodes + }; + }; + \endcode + + Now you are ready to declare our map class based on \p %SplitListMap: + \code + typedef cc::SplitListMap< cds::urcu::gc >, int, std::string, foo_set_traits > int_string_map; + \endcode + + You may use the modern option-based declaration instead of classic type-traits-based one: + \code + typedef cc:SplitListMap< + cds::urcu::gc > // RCU type + ,int // key type + ,std::string // value type + ,cc::split_list::make_traits< // metafunction to build split-list traits + cc::split_list::ordered_list // tag for underlying ordered list implementation + ,cc::opt::hash< std::hash > // hash functor + ,cc::split_list::ordered_list_traits< // ordered list traits desired + cc::michael_list::make_traits< // metafunction to build lazy list traits + cc::opt::less< std::less > // less-based compare functor + >::type + > + >::type + > int_string_map; + \endcode + In case of option-based declaration using split_list::make_traits metafunction the struct \p foo_set_traits is not required. + + Now, the map of type \p int_string_map is ready to use in your program. + + Note that in this example we show only mandatory type_traits parts, optional ones is the default and they are inherited + from cds::container::split_list::type_traits. + The cds library contains many other options for deep tuning of behavior of the split-list and + ordered-list containers. + */ + template < + class RCU, + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = split_list::type_traits +#else + class Traits +#endif + > + class SplitListMap< cds::urcu::gc< RCU >, Key, Value, Traits >: + protected container::SplitListSet< + cds::urcu::gc< RCU >, + std::pair, + split_list::details::wrap_map_traits + > + { + //@cond + typedef container::SplitListSet< + cds::urcu::gc< RCU >, + std::pair, + split_list::details::wrap_map_traits + > base_class; + //@endcond + + public: + typedef typename base_class::gc gc ; ///< Garbage collector + typedef Traits options ; ///< ]p Traits template argument + typedef Key key_type ; ///< key type + typedef Value mapped_type ; ///< type of value stored in the map + + typedef std::pair value_type ; ///< key-value pair type + typedef typename base_class::ordered_list ordered_list; ///< Underlying ordered list class + typedef typename base_class::key_comparator key_comparator ; ///< key comparison functor + + typedef typename base_class::hash hash ; ///< Hash functor for \ref key_type + typedef typename base_class::item_counter item_counter ; ///< Item counter type + + typedef typename base_class::rcu_lock rcu_lock ; ///< RCU scoped lock + typedef typename base_class::exempt_ptr exempt_ptr ; ///< pointer to extracted node + /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that + static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; + + protected: + //@cond + typedef typename base_class::maker::type_traits::key_accessor key_accessor; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + template + class ensure_functor_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + ensure_functor_wrapper() {} + ensure_functor_wrapper( Func f ): base_class(f) {} + + template + void operator()( bool bNew, value_type& item, const Q& /*val*/ ) + { + base_class::get()( bNew, item ); + } + }; + + template + class find_functor_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + find_functor_wrapper() {} + find_functor_wrapper( Func f ): base_class(f) {} + + template + void operator()( value_type& pair, Q const& /*val*/ ) + { + base_class::get()( pair ); + } + }; +# endif // ifndef CDS_CXX11_LAMBDA_SUPPORT + //@endcond + + public: + /// Forward iterator + typedef typename base_class::iterator iterator; + + /// Const forward iterator + typedef typename base_class::const_iterator const_iterator; + + /// Returns a forward iterator addressing the first element in a map + /** + For empty map \code begin() == end() \endcode + */ + iterator begin() + { + return base_class::begin(); + } + + /// Returns an iterator that addresses the location succeeding the last element in a map + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the map. + For empty map \code begin() == end() \endcode + */ + iterator end() + { + return base_class::end(); + } + + /// Returns a forward const iterator addressing the first element in a map + //@{ + const_iterator begin() const + { + return base_class::begin(); + } + const_iterator cbegin() + { + return base_class::cbegin(); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a map + //@{ + const_iterator end() const + { + return base_class::end(); + } + const_iterator cend() + { + return base_class::cend(); + } + //@} + + public: + /// Initializes split-ordered map of default capacity + /** + The default capacity is defined in bucket table constructor. + See intrusive::split_list::expandable_bucket_table, intrusive::split_list::static_bucket_table + which selects by intrusive::split_list::dynamic_bucket_table option. + */ + SplitListMap() + : base_class() + {} + + /// Initializes split-ordered map + SplitListMap( + size_t nItemCount ///< estimate average item count + , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 10, default is 1. + ) + : base_class( nItemCount, nLoadFactor ) + {} + + public: + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + The function applies RCU lock internally. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K const& key ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::insert( std::make_pair( key, mapped_type() ) ); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + The function applies RCU lock internally. + + Returns \p true if \p val is inserted into the map, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::insert( std::make_pair(key, val) ); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + It should be keep in mind that concurrent modifications of \p item.second may be possible. + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this \p item by concurrent threads. + + The user-defined functor can be passed by reference using boost::ref + and it is called only if inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + The function applies RCU lock internally. + */ + template + bool insert_key( K const& key, Func func ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::insert( std::make_pair( key, mapped_type() ), func ); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// For key \p key inserts data of type \ref mapped_type constructed with std::forward(args)... + /** + \p key_type should be constructible from type \p K + + The function applies RCU lock internally. + + Returns \p true if inserting successful, \p false otherwise. + + @note This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( K&& key, Args&&... args ) + { + return base_class::emplace( std::forward(key), std::move(mapped_type(std::forward(args)...))); + } +# endif + + /// Ensures that the \p key exists in the map + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the map, then the new item created from \p key + is inserted into the map (note that in this case the \ref key_type should be + constructible from type \p K). + Otherwise, the functor \p func is called with item found. + The functor \p Func may be a function with signature: + \code + void func( bool bNew, value_type& item ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + + The functor may change any fields of the \p item.second that is \ref mapped_type; + however, \p func must guarantee that during changing no any other modifications + could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + The function applies RCU lock internally. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( K const& key, Func func ) + { + //TODO: pass arguments by reference (make_pair makes copy) +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::ensure( std::make_pair( key, mapped_type() ), + [&func](bool bNew, value_type& item, value_type const& /*val*/) { + cds::unref(func)( bNew, item ); + } ); +# else + ensure_functor_wrapper fw( func ); + return base_class::ensure( std::make_pair( key, mapped_type() ), cds::ref(fw) ); +# endif + } + + /// Deletes \p key from the map + /** \anchor cds_nonintrusive_SplitListMap_rcu_erase_val + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_rcu_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred ) + { + return base_class::erase_with( key, cds::details::predicate_wrapper() ); + } + + /// Deletes \p key from the map + /** \anchor cds_nonintrusive_SplitListMap_rcu_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface is: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + The functor may be passed by reference using boost:ref + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + return base_class::erase( key, f ); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_rcu_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + return base_class::erase_with( key, cds::details::predicate_wrapper(), f ); + } + + /// Extracts an item from the map + /** \anchor cds_nonintrusive_SplitListMap_rcu_extract + The function searches an item with key equal to \p key in the map, + unlinks it from the map, places item pointer into \p dest argument, and returns \p true. + If the item with the key equal to \p key is not found the function return \p false. + + @note The function does NOT call RCU read-side lock or synchronization, + and does NOT dispose the item found. It just excludes the item from the map + and returns a pointer to item found. + You should lock RCU before calling of the function, and you should synchronize RCU + outside the RCU lock to free extracted item + + \code + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::container::SplitListMap< rcu, int, Foo > splitlist_map; + + splitlist_map theMap; + // ... + + typename splitlist_map::exempt_ptr p; + { + // first, we should lock RCU + typename splitlist_map::rcu_lock lock; + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + if ( theMap.extract( p, 10 )) { + // do something with p + ... + } + } + + // We may safely release p here + // release() passes the pointer to RCU reclamation cycle + p.release(); + \endcode + */ + template + bool extract( exempt_ptr& dest, K const& key ) + { + return base_class::extract( dest, key ); + } + + /// Extracts an item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_rcu_extract "extract(exempt_ptr&, K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + bool extract_with( exempt_ptr& dest, K const& key, Less pred ) + { + return base_class::extract_with( dest, key, cds::details::predicate_wrapper()); + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_SplitListMap_rcu_find_cfunc + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change \p item.second. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the map's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The function applies RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( key, [&f](value_type& pair, K const&){ cds::unref(f)( pair ); } ); +# else + find_functor_wrapper fw(f); + return base_class::find( key, cds::ref(fw) ); +# endif + } + + /// Finds the key \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_rcu_find_cfunc "find(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( key, + cds::details::predicate_wrapper(), + [&f](value_type& pair, K const&){ cds::unref(f)( pair ); } ); +# else + find_functor_wrapper fw(f); + return base_class::find_with( key, cds::details::predicate_wrapper(), cds::ref(fw) ); +# endif + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_SplitListMap_rcu_find_val + + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool find( K const& key ) + { + return base_class::find( key ); + } + + /// Finds the key \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_rcu_find_val "find(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred ) + { + return base_class::find_with( key, cds::details::predicate_wrapper() ); + } + + /// Finds \p key and return the item found + /** \anchor cds_intrusive_SplitListMap_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns \p NULL. + + Note the compare functor should accept a parameter of type \p K that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::container::SplitListMap< rcu, int, Foo > splitlist_map; + splitlist_map theMap; + // ... + { + // Lock RCU + typename splitlist_map::rcu_lock lock; + + typename splitlist_map::value_type * pVal = theMap.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + // Unlock RCU by rcu_lock destructor + // pVal can be retired by disposer at any time after RCU has been unlocked + } + \endcode + */ + template + value_type * get( K const& key ) + { + return base_class::get( key ); + } + + /// Finds \p key with predicate specified and return the item found + /** + The function is an analog of \ref cds_intrusive_SplitListMap_rcu_get "get(K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + value_type * get_with( K const& key, Less pred ) + { + return base_class::get_with( key, cds::details::predicate_wrapper()); + } + + /// Clears the map (non-atomic) + /** + The function unlink all items from the map. + The function is not atomic and not lock-free and should be used for debugging only. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + Thus, the correct item counting is an important part of the map implementation. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + }; + + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_SPLIT_LIST_MAP_RCU_H diff --git a/cds/container/split_list_set.h b/cds/container/split_list_set.h new file mode 100644 index 00000000..ffdb9a57 --- /dev/null +++ b/cds/container/split_list_set.h @@ -0,0 +1,892 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SPLIT_LIST_SET_H +#define __CDS_CONTAINER_SPLIT_LIST_SET_H + +#include +#include +#include + +namespace cds { namespace container { + + /// Split-ordered list set + /** @ingroup cds_nonintrusive_set + \anchor cds_nonintrusive_SplitListSet_hp + + Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see + - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables" + - [2008] Nir Shavit "The Art of Multiprocessor Programming" + + See intrusive::SplitListSet for a brief description of the split-list algorithm. + + Template parameters: + - \p GC - Garbage collector used + - \p T - type stored in the split-list. The type must be default- and copy-constructible. + - \p Traits - type traits, default is split_list::type_traits. Instead of declaring split_list::type_traits -based + struct you may apply option-based notation with split_list::make_traits metafunction. + + There are the specializations: + - for \ref cds_urcu_desc "RCU" - declared in cd/container/split_list_set_rcu.h, + see \ref cds_nonintrusive_SplitListSet_rcu "SplitListSet". + - for \ref cds::gc::nogc declared in cds/container/split_list_set_nogc.h, + see \ref cds_nonintrusive_SplitListSet_nogc "SplitListSet". + + \par Usage + + You should decide what garbage collector you want, and what ordered list you want to use. Split-ordered list + is original data structure based on an ordered list. Suppose, you want construct split-list set based on gc::PTB GC + and LazyList as ordered list implementation. So, you beginning your program with following include: + \code + #include + #include + + namespace cc = cds::container; + + // The data belonged to split-ordered list + sturuct foo { + int nKey; // key field + std::string strValue ; // value field + }; + \endcode + The inclusion order is important: first, include header for ordered-list implementation (for this example, cds/container/lazy_list_ptb.h), + then the header for split-list set cds/container/split_list_set.h. + + Now, you should declare traits for split-list set. The main parts of traits are a hash functor for the set and a comparing functor for ordered list. + Note that we define several function in foo_hash and foo_less functors for different argument types since we want call our \p %SplitListSet + object by the key of type int and by the value of type foo. + + The second attention: instead of using \p %LazyList in \p %SplitListSet traits we use a tag cds::contaner::lazy_list_tag for the lazy list. + The split-list requires significant support from underlying ordered list class and it is not good idea to dive you + into deep implementation details of split-list and ordered list interrelations. The tag paradigm simplifies split-list interface. + + \code + // foo hash functor + struct foo_hash { + size_t operator()( int key ) const { return std::hash( key ) ; } + size_t operator()( foo const& item ) const { return std::hash( item.nKey ) ; } + }; + + // foo comparator + struct foo_less { + bool operator()(int i, foo const& f ) const { return i < f.nKey ; } + bool operator()(foo const& f, int i ) const { return f.nKey < i ; } + bool operator()(foo const& f1, foo const& f2) const { return f1.nKey < f2.nKey; } + }; + + // SplitListSet traits + struct foo_set_traits: public cc::split_list::type_traits + { + typedef cc::lazy_list_tag ordered_list ; // what type of ordered list we want to use + typedef foo_hash hash ; // hash functor for our data stored in split-list set + + // Type traits for our LazyList class + struct ordered_list_traits: public cc::lazy_list::type_traits + { + typedef foo_less less ; // use our foo_less as comparator to order list nodes + }; + }; + \endcode + + Now you are ready to declare our set class based on \p %SplitListSet: + \code + typedef cc::SplitListSet< cds::gc::PTB, foo, foo_set_traits > foo_set; + \endcode + + You may use the modern option-based declaration instead of classic type-traits-based one: + \code + typedef cc:SplitListSet< + cs::gc::PTB // GC used + ,foo // type of data stored + ,cc::split_list::make_traits< // metafunction to build split-list traits + cc::split_list::ordered_list // tag for underlying ordered list implementation + ,cc::opt::hash< foo_hash > // hash functor + ,cc::split_list::ordered_list_traits< // ordered list traits desired + cc::lazy_list::make_traits< // metafunction to build lazy list traits + cc::opt::less< foo_less > // less-based compare functor + >::type + > + >::type + > foo_set; + \endcode + In case of option-based declaration using split_list::make_traits metafunction + the struct \p foo_set_traits is not required. + + Now, the set of type \p foo_set is ready to use in your program. + + Note that in this example we show only mandatory type_traits parts, optional ones is the default and they are inherited + from cds::container::split_list::type_traits. + The cds library contains many other options for deep tuning of behavior of the split-list and + ordered-list containers. + */ + template < + class GC, + class T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = split_list::type_traits +#else + class Traits +#endif + > + class SplitListSet: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::SplitListSet +#else + protected details::make_split_list_set< GC, T, typename Traits::ordered_list, split_list::details::wrap_set_traits >::type +#endif + { + protected: + //@cond + typedef details::make_split_list_set< GC, T, typename Traits::ordered_list, split_list::details::wrap_set_traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef Traits options ; ///< \p Traits template argument + typedef typename maker::gc gc ; ///< Garbage collector + typedef typename maker::value_type value_type ; ///< type of value stored in the list + typedef typename maker::ordered_list ordered_list ; ///< Underlying ordered list class + typedef typename base_class::key_comparator key_comparator; ///< key compare functor + + /// Hash functor for \p %value_type and all its derivatives that you use + typedef typename base_class::hash hash; + typedef typename base_class::item_counter item_counter ; ///< Item counter type + + protected: + //@cond + typedef typename maker::cxx_node_allocator cxx_node_allocator; + typedef typename maker::node_type node_type; + //@endcond + + public: + /// Guarded pointer + typedef cds::gc::guarded_ptr< gc, node_type, value_type, details::guarded_ptr_cast_set > guarded_ptr; + + protected: + //@cond + template + static node_type * alloc_node(Q const& v ) + { + return cxx_node_allocator().New( v ); + } + + template + bool find_( Q& val, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( val, [&f]( node_type& item, Q& val ) { cds::unref(f)(item.m_Value, val) ; } ); +# else + find_functor_wrapper fw(f); + return base_class::find( val, cds::ref(fw) ); +# endif + } + + template + bool find_with_( Q& val, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( val, typename maker::template predicate_wrapper::type(), + [&f]( node_type& item, Q& val ) { cds::unref(f)(item.m_Value, val) ; } ); +# else + find_functor_wrapper fw(f); + return base_class::find_with( val, typename maker::template predicate_wrapper::type(), cds::ref(fw) ); +# endif + } + +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_node_allocator().MoveNew( std::forward(args)...); + } +# endif + + static void free_node( node_type * pNode ) + { + cxx_node_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + bool insert_node( node_type * pNode ) + { + assert( pNode != null_ptr() ); + scoped_node_ptr p(pNode); + + if ( base_class::insert( *pNode ) ) { + p.release(); + return true; + } + + return false; + } + + //@endcond + + protected: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + template + class insert_functor_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + insert_functor_wrapper( Func f ): base_class(f) {} + + void operator()(node_type& node) + { + base_class::get()( node.m_Value ); + } + }; + + template + class ensure_functor_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + Q const& m_val; + public: + ensure_functor_wrapper( Func f, Q const& v ): base_class(f), m_val(v) {} + + void operator()( bool bNew, node_type& item, node_type const& /*val*/ ) + { + base_class::get()( bNew, item.m_Value, m_val ); + } + }; + + template + class find_functor_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + find_functor_wrapper( Func f ): base_class(f) {} + + template + void operator()( node_type& item, Q& val ) + { + base_class::get()( item.m_Value, val ); + } + }; + + template + class erase_functor_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + erase_functor_wrapper( Func f ): base_class( f ) {} + + void operator()(node_type& node) + { + base_class::get()( node.m_Value ); + } + }; +# endif // ifndef CDS_CXX11_LAMBDA_SUPPORT + //@endcond + + protected: + /// Forward iterator + /** + \p IsConst - constness boolean flag + + The forward iterator for a split-list has the following features: + - it has no post-increment operator + - it depends on underlying ordered list iterator + - The iterator object cannot be moved across thread boundary since it contains GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the split-list. + + Therefore, the use of iterators in concurrent environment is not good idea. Use it for debug purpose only. + */ + template + class iterator_type: protected base_class::template iterator_type + { + //@cond + typedef typename base_class::template iterator_type iterator_base_class; + friend class SplitListSet; + //@endcond + public: + /// Value pointer type (const for const iterator) + typedef typename cds::details::make_const_type::pointer value_ptr; + /// Value reference type (const for const iterator) + typedef typename cds::details::make_const_type::reference value_ref; + + public: + /// Default ctor + iterator_type() + {} + + /// Copy ctor + iterator_type( iterator_type const& src ) + : iterator_base_class( src ) + {} + + protected: + //@cond + explicit iterator_type( iterator_base_class const& src ) + : iterator_base_class( src ) + {} + //@endcond + + public: + /// Dereference operator + value_ptr operator ->() const + { + return &(iterator_base_class::operator->()->m_Value); + } + + /// Dereference operator + value_ref operator *() const + { + return iterator_base_class::operator*().m_Value; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base_class::operator++(); + return *this; + } + + /// Assignment operator + iterator_type& operator = (iterator_type const& src) + { + iterator_base_class::operator=(src); + return *this; + } + + /// Equality operator + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base_class::operator==(i); + } + + /// Equality operator + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base_class::operator!=(i); + } + }; + + public: + /// Initializes split-ordered list of default capacity + /** + The default capacity is defined in bucket table constructor. + See intrusive::split_list::expandable_bucket_table, intrusive::split_list::static_bucket_table + which selects by intrusive::split_list::dynamic_bucket_table option. + */ + SplitListSet() + : base_class() + {} + + /// Initializes split-ordered list + SplitListSet( + size_t nItemCount ///< estimate average of item count + , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 8, default is 1. + ) + : base_class( nItemCount, nLoadFactor ) + {} + + public: + /// Forward iterator + typedef iterator_type iterator; + + /// Const forward iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( base_class::begin() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( base_class::end() ); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return const_iterator( base_class::begin() ); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator end() const + { + return const_iterator( base_class::end() ); + } + + public: + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain as minimum the complete key for the node. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + return insert_node( alloc_node( val ) ); + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. It may be passed by reference + using boost::ref + */ + template + bool insert( Q const& val, Func f ) + { + scoped_node_ptr pNode( alloc_node( val )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( base_class::insert( *pNode, [&f](node_type& node) { cds::unref(f)( node.m_Value ) ; } )) +# else + insert_functor_wrapper fw(f); + if ( base_class::insert( *pNode, cds::ref(fw) ) ) +# endif + { + pNode.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \p %value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + @note This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + return insert_node( alloc_node( std::forward(args)...)); + } +# endif + + /// Ensures that the \p item exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p val key not found in the set, then the new item created from \p val + is inserted into the set. Otherwise, the functor \p func is called with the item found. + The functor \p Func should be a function with signature: + \code + void func( bool bNew, value_type& item, const Q& val ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p ensure function + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair ensure( Q const& val, Func func ) + { + scoped_node_ptr pNode( alloc_node( val )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair bRet = base_class::ensure( *pNode, + [&func, &val]( bool bNew, node_type& item, node_type const& /*val*/ ) { + cds::unref(func)( bNew, item.m_Value, val ); + } ); +# else + ensure_functor_wrapper fw( func, val ); + std::pair bRet = base_class::ensure( *pNode, cds::ref(fw) ); +# endif + + if ( bRet.first && bRet.second ) + pNode.release(); + return bRet; + } + + /// Deletes \p key from the set + /** \anchor cds_nonintrusive_SplitListSet_erase_val + + The item comparator should be able to compare the values of type \p value_type + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + return base_class::erase_with( key, typename maker::template predicate_wrapper::type() ); + } + + /// Deletes \p key from the set + /** \anchor cds_nonintrusive_SplitListSet_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type const& val); + }; + \endcode + The functor may be passed by reference using boost:ref + + Since the key of SplitListSet's \p value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the values of the type \p value_type + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase( key, [&f](node_type& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor_wrapper fw( f ); + return base_class::erase( key, cds::ref(fw) ); +# endif + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase_with( key, typename maker::template predicate_wrapper::type(), + [&f](node_type& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor_wrapper fw( f ); + return base_class::erase_with( key, typename maker::template predicate_wrapper::type(), cds::ref(fw) ); +# endif + } + + /// Extracts the item with specified \p key + /** \anchor cds_nonintrusive_SplitListSet_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the set, and returns it in \p dest parameter. + If the item with key equal to \p key is not found the function returns \p false. + + Note the compare functor should accept a parameter of type \p Q that may be not the same as \p value_type. + + The extracted item is freed automatically when returned \ref guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::SplitListSet< your_template_args > splitlist_set; + splitlist_set theSet; + // ... + { + splitlist_set::guarded_ptr gp; + theSet.extract( gp, 5 ); + // Deal with gp + // ... + + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + bool extract( guarded_ptr& dest, Q const& key ) + { + return extract_( dest.guard(), key ); + } + + /// Extracts the item using compare functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_hp_extract "extract(guarded_ptr&, Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool extract_with( guarded_ptr& dest, Q const& key, Less pred ) + { + return extract_with_( dest.guard(), key, pred ); + } + + /// Finds the key \p val + /** \anchor cds_nonintrusive_SplitListSet_find_func + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return find_( val, f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { + return find_with_( val, pred, f ); + } + + /// Finds the key \p val + /** \anchor cds_nonintrusive_SplitListSet_find_cfunc + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return find_( val, f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { + return find_with_( val, pred, f ); + } + + /// Finds the key \p val + /** \anchor cds_nonintrusive_SplitListSet_find_val + + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \ref value_type. + */ + template + bool find( Q const& val ) + { + return base_class::find( val ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred ) + { + return base_class::find_with( val, typename maker::template predicate_wrapper::type() ); + } + + /// Finds the key \p key and return the item found + /** \anchor cds_nonintrusive_SplitListSet_hp_get + The function searches the item with key equal to \p key + and assigns the item found to guarded pointer \p ptr. + The function returns \p true if \p key is found, and \p false otherwise. + If \p key is not found the \p ptr parameter is not changed. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::SplitListSet< your_template_params > splitlist_set; + splitlist_set theSet; + // ... + { + splitlist_set::guarded_ptr gp; + if ( theSet.get( gp, 5 )) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for split-list set + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool get( guarded_ptr& ptr, Q const& key ) + { + return get_( ptr.guard(), key ); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_hp_get "get( guarded_ptr&, Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool get_with( guarded_ptr& ptr, Q const& key, Less pred ) + { + return get_with_( ptr.guard(), key, pred ); + } + + /// Clears the set (non-atomic) + /** + The function unlink all items from the set. + The function is not atomic and not lock-free and should be used for debugging only. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then assume that the set is empty. + Thus, the correct item counting feature is an important part of split-list set implementation. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + size_t size() const + { + return base_class::size(); + } + + protected: + //@cond + using base_class::extract_; + using base_class::get_; + + template + bool extract_with_( typename gc::Guard& guard, Q const& key, Less pred ) + { + return base_class::extract_with_( guard, key, typename maker::template predicate_wrapper::type() ); + } + + template + bool get_with_( typename gc::Guard& guard, Q const& key, Less pred ) + { + return base_class::get_with_( guard, key, typename maker::template predicate_wrapper::type() ); + } + + //@endcond + + }; + + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_SPLIT_LIST_SET_H diff --git a/cds/container/split_list_set_nogc.h b/cds/container/split_list_set_nogc.h new file mode 100644 index 00000000..2dfaa74a --- /dev/null +++ b/cds/container/split_list_set_nogc.h @@ -0,0 +1,360 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SPLIT_LIST_SET_NOGC_H +#define __CDS_CONTAINER_SPLIT_LIST_SET_NOGC_H + +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Split-ordered list set (template specialization for gc::nogc) + /** @ingroup cds_nonintrusive_set + \anchor cds_nonintrusive_SplitListSet_nogc + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of list item. + + See \ref cds_nonintrusive_SplitListSet_hp "SplitListSet" for description of template parameters. + + The interface of the specialization is a slightly different. + */ + template < + class T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = split_list::type_traits +#else + class Traits +#endif + > + class SplitListSet< cds::gc::nogc, T, Traits> +#ifdef CDS_DOXYGEN_INVOKED + :protected intrusive::SplitListSet +#else + :protected details::make_split_list_set< cds::gc::nogc, T, typename Traits::ordered_list, split_list::details::wrap_set_traits >::type +#endif + { + protected: + //@cond + typedef details::make_split_list_set< cds::gc::nogc, T, typename Traits::ordered_list, split_list::details::wrap_set_traits > options; + typedef typename options::type base_class; + //@endcond + + public: + typedef typename options::gc gc ; ///< Garbage collector + typedef typename options::value_type value_type ; ///< type of value stored in the list + typedef typename options::ordered_list ordered_list ; ///< Underlying ordered list class + typedef typename base_class::key_comparator key_comparator ; ///< key comparison functor + + /// Hash functor for \ref value_type and all its derivatives that you use + typedef typename base_class::hash hash; + typedef typename base_class::item_counter item_counter ; ///< Item counter type + + protected: + //@cond + typedef typename options::cxx_node_allocator cxx_node_allocator; + typedef typename options::node_type node_type; + + template + static node_type * alloc_node(Q const& v ) + { + return cxx_node_allocator().New( v ); + } + +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_node_allocator().MoveNew( std::forward(args)...); + } +# endif + + static void free_node( node_type * pNode ) + { + cxx_node_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + //@endcond + + protected: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_ensure_functor + { + void operator()( bool /*bNew*/, node_type& /*item*/, node_type& /*val*/ ) + {} + }; +# endif + //@endcond + + + public: + /// Initialize split-ordered list of default capacity + /** + The default capacity is defined in bucket table constructor. + See intrusive::split_list::expandable_bucket_table, intrusive::split_list::static_ducket_table + which selects by intrusive::split_list::dynamic_bucket_table option. + */ + SplitListSet() + : base_class() + {} + + /// Initialize split-ordered list + SplitListSet( + size_t nItemCount ///< estimate average of item count + , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 10, default is 1. + ) + : base_class( nItemCount, nLoadFactor ) + {} + + protected: + /// Forward iterator + /** + \p IsConst - constness boolean flag + + The forward iterator has the following features: + - it has no post-increment operator + - it depends on underlying ordered list iterator + */ + template + class iterator_type: protected base_class::template iterator_type + { + //@cond + typedef typename base_class::template iterator_type iterator_base_class; + friend class SplitListSet; + //@endcond + public: + /// Value pointer type (const for const iterator) + typedef typename cds::details::make_const_type::pointer value_ptr; + /// Value reference type (const for const iterator) + typedef typename cds::details::make_const_type::reference value_ref; + + public: + /// Default ctor + iterator_type() + {} + + /// Copy ctor + iterator_type( iterator_type const& src ) + : iterator_base_class( src ) + {} + + protected: + //@cond + explicit iterator_type( iterator_base_class const& src ) + : iterator_base_class( src ) + {} + //@endcond + + public: + /// Dereference operator + value_ptr operator ->() const + { + return &(iterator_base_class::operator->()->m_Value); + } + + /// Dereference operator + value_ref operator *() const + { + return iterator_base_class::operator*().m_Value; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base_class::operator++(); + return *this; + } + + /// Assignment operator + iterator_type& operator = (iterator_type const& src) + { + iterator_base_class::operator=(src); + return *this; + } + + /// Equality operator + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base_class::operator==(i); + } + + /// Equality operator + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base_class::operator!=(i); + } + }; + + public: + /// Forward iterator + typedef iterator_type iterator; + + /// Const forward iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( base_class::begin() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( base_class::end() ); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return const_iterator( base_class::begin() ); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator end() const + { + return const_iterator( base_class::end() ); + } + + protected: + //@cond + iterator insert_node( node_type * pNode ) + { + assert( pNode != null_ptr() ); + scoped_node_ptr p(pNode); + + iterator it( base_class::insert_( *pNode )); + if ( it != end() ) { + p.release(); + return it; + } + + return end(); + } + //@endcond + + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + The \ref value_type should be constructible from a value of type \p Q. + + Return an iterator pointing to inserted item if success \ref end() otherwise + */ + template + iterator insert( const Q& val ) + { + return insert_node( alloc_node( val ) ); + } + +#ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Return an iterator pointing to inserted item if success \ref end() otherwise + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + iterator emplace( Args&&... args ) + { + return insert_node( alloc_node( std::forward(args)... ) ); + } +#endif + + /// Ensures that the item \p val exists in the set + /** + The operation inserts new item created from \p val if the key \p val is not found in the set. + Otherwise, the function returns an iterator that points to item found. + The \p value_type should be constructible from a value of type \p Q. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted, \p second is true if new item has been added or \p false if the item + already is in the set. + */ + template + std::pair ensure( const Q& val ) + { + scoped_node_ptr pNode( alloc_node( val )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair ret = base_class::ensure_( *pNode, [](bool /*bNew*/, node_type& /*item*/, node_type& /*val*/){} ); +# else + std::pair ret = base_class::ensure_( *pNode, empty_ensure_functor() ); +# endif + if ( ret.first != base_class::end() && ret.second ) { + pNode.release(); + return std::make_pair( iterator(ret.first), ret.second ); + } + + return std::make_pair( iterator(ret.first), ret.second ); + } + + /// Find the key \p val + /** \anchor cds_nonintrusive_SplitListSet_nogc_find + + The function searches the item with key equal to \p key + and returns an iterator pointed to item found if the key is found, + and \ref end() otherwise + */ + template + iterator find( Q const& key ) + { + return iterator( base_class::find_( key )); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_nogc_find "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + iterator find_with( Q const& key, Less pred ) + { + return iterator( base_class::find_with_( key, typename options::template predicate_wrapper::type() )); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, the correct item counting feature is an important part of split-list set implementation. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + size_t size() const + { + return base_class::size(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_SPLIT_LIST_SET_NOGC_H diff --git a/cds/container/split_list_set_rcu.h b/cds/container/split_list_set_rcu.h new file mode 100644 index 00000000..1080adc5 --- /dev/null +++ b/cds/container/split_list_set_rcu.h @@ -0,0 +1,966 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_SPLIT_LIST_SET_RCU_H +#define __CDS_CONTAINER_SPLIT_LIST_SET_RCU_H + +#include +#include +#include + +namespace cds { namespace container { + + /// Split-ordered list set (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_nonintrusive_set + \anchor cds_nonintrusive_SplitListSet_rcu + + Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see + - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables" + - [2008] Nir Shavit "The Art of Multiprocessor Programming" + + See intrusive::SplitListSet for a brief description of the split-list algorithm. + + Template parameters: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p T - type stored in the split-list. The type must be default- and copy-constructible. + - \p Traits - type traits, default is split_list::type_traits. Instead of declaring split_list::type_traits -based + struct you may apply option-based notation with split_list::make_traits metafunction. + + Iterators + + The class supports a forward iterator (\ref iterator and \ref const_iterator). + The iteration is ordered. + + You may iterate over split-list set items only under RCU lock. + Only in this case the iterator is thread-safe since + while RCU is locked any set's item cannot be reclaimed. + + The requirement of RCU lock during iterating means that deletion of the elements (i.e. \ref erase) + is not possible. + + @warning The iterator object cannot be passed between threads + + \warning Due to concurrent nature of skip-list set it is not guarantee that you can iterate + all elements in the set: any concurrent deletion can exclude the element + pointed by the iterator from the set, and your iteration can be terminated + before end of the set. Therefore, such iteration is more suitable for debugging purposes + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced. + + \par Usage + + You should decide what garbage collector you want, and what ordered list you want to use. Split-ordered list + is an original data structure based on an ordered list. Suppose, you want construct split-list set based on cds::urcu::general_buffered<> GC + and LazyList as ordered list implementation. So, you beginning your program with following include: + \code + #include + #include + #include + + namespace cc = cds::container; + + // The data belonged to split-ordered list + sturuct foo { + int nKey; // key field + std::string strValue ; // value field + }; + \endcode + The inclusion order is important: + - first, include one of \ref cds_urcu_gc "RCU implementation" (cds/urcu/general_buffered.h in our case) + - second, include file for ordered-list implementation (for this example, cds/container/lazy_list_rcu.h), + - then, the header for RCU-based split-list set cds/container/split_list_set_rcu.h. + + Now, you should declare traits for split-list set. The main parts of traits are a hash functor for the set and a comparing functor for ordered list. + Note that we define several function in foo_hash and foo_less functors for different argument types since we want call our \p %SplitListSet + object by the key of type int and by the value of type foo. + + The second attention: instead of using LazyList in SplitListSet traits we use a tag cds::contaner::lazy_list_tag for the lazy list. + The split-list requires significant support from underlying ordered list class and it is not good idea to dive you + into deep implementation details of split-list and ordered list interrelations. The tag paradigm simplifies split-list interface. + + \code + // foo hash functor + struct foo_hash { + size_t operator()( int key ) const { return std::hash( key ) ; } + size_t operator()( foo const& item ) const { return std::hash( item.nKey ) ; } + }; + + // foo comparator + struct foo_less { + bool operator()(int i, foo const& f ) const { return i < f.nKey ; } + bool operator()(foo const& f, int i ) const { return f.nKey < i ; } + bool operator()(foo const& f1, foo const& f2) const { return f1.nKey < f2.nKey; } + }; + + // SplitListSet traits + struct foo_set_traits: public cc::split_list::type_traits + { + typedef cc::lazy_list_tag ordered_list ; // what type of ordered list we want to use + typedef foo_hash hash ; // hash functor for our data stored in split-list set + + // Type traits for our LazyList class + struct ordered_list_traits: public cc::lazy_list::type_traits + { + typedef foo_less less ; // use our foo_less as comparator to order list nodes + }; + }; + \endcode + + Now you are ready to declare our set class based on \p %SplitListSet: + \code + typedef cc::SplitListSet< cds::urcu::gc >, foo, foo_set_traits > foo_set; + \endcode + + You may use the modern option-based declaration instead of classic type-traits-based one: + \code + typedef cc:SplitListSet< + cds::urcu::gc > // RCU type used + ,foo // type of data stored + ,cc::split_list::make_traits< // metafunction to build split-list traits + cc::split_list::ordered_list // tag for underlying ordered list implementation + ,cc::opt::hash< foo_hash > // hash functor + ,cc::split_list::ordered_list_traits< // ordered list traits desired + cc::lazy_list::make_traits< // metafunction to build lazy list traits + cc::opt::less< foo_less > // less-based compare functor + >::type + > + >::type + > foo_set; + \endcode + In case of option-based declaration using split_list::make_traits metafunction + the struct \p foo_set_traits is not required. + + Now, the set of type \p foo_set is ready to use in your program. + + Note that in this example we show only mandatory type_traits parts, optional ones is the default and they are inherited + from cds::container::split_list::type_traits. + The cds library contains many other options for deep tuning of behavior of the split-list and + ordered-list containers. + */ + template < + class RCU, + class T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = split_list::type_traits +#else + class Traits +#endif + > + class SplitListSet< cds::urcu::gc< RCU >, T, Traits >: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::SplitListSet< cds::urcu::gc< RCU >, typename Traits::ordered_list, Traits > +#else + protected details::make_split_list_set< cds::urcu::gc< RCU >, T, typename Traits::ordered_list, split_list::details::wrap_set_traits >::type +#endif + { + protected: + //@cond + typedef details::make_split_list_set< cds::urcu::gc< RCU >, T, typename Traits::ordered_list, split_list::details::wrap_set_traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef Traits options ; ///< \p Traits template argument + typedef typename maker::gc gc ; ///< Garbage collector + typedef typename maker::value_type value_type ; ///< type of value stored in the list + typedef typename maker::ordered_list ordered_list ; ///< Underlying ordered list class + typedef typename base_class::key_comparator key_comparator; ///< key compare functor + + /// Hash functor for \ref value_type and all its derivatives that you use + typedef typename base_class::hash hash; + typedef typename base_class::item_counter item_counter ; ///< Item counter type + + typedef typename base_class::rcu_lock rcu_lock ; ///< RCU scoped lock + /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that + static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; + + protected: + //@cond + typedef typename maker::cxx_node_allocator cxx_node_allocator; + typedef typename maker::node_type node_type; + //@endcond + + public: + /// pointer to extracted node + typedef cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::ordered_list_traits::disposer > exempt_ptr; + + protected: + //@cond + + template + static node_type * alloc_node(Q const& v ) + { + return cxx_node_allocator().New( v ); + } + + template + bool find_( Q& val, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( val, [&f]( node_type& item, Q& val ) { cds::unref(f)(item.m_Value, val) ; } ); +# else + find_functor_wrapper fw(f); + return base_class::find( val, cds::ref(fw) ); +# endif + } + + template + bool find_with_( Q& val, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( val, typename maker::template predicate_wrapper::type(), + [&f]( node_type& item, Q& val ) { cds::unref(f)(item.m_Value, val) ; } ); +# else + find_functor_wrapper fw(f); + return base_class::find_with( val, typename maker::template predicate_wrapper::type(), cds::ref(fw) ); +# endif + } + + +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_node_allocator().MoveNew( std::forward(args)...); + } +# endif + + static void free_node( node_type * pNode ) + { + cxx_node_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + bool insert_node( node_type * pNode ) + { + assert( pNode != null_ptr() ); + scoped_node_ptr p(pNode); + + if ( base_class::insert( *pNode ) ) { + p.release(); + return true; + } + + return false; + } + + //@endcond + + protected: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + template + class insert_functor_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + insert_functor_wrapper( Func f ): base_class(f) {} + + void operator()(node_type& node) + { + base_class::get()( node.m_Value ); + } + }; + + template + class ensure_functor_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + Q const& m_val; + public: + ensure_functor_wrapper( Func f, Q const& v ): base_class(f), m_val(v) {} + + void operator()( bool bNew, node_type& item, node_type const& /*val*/ ) + { + base_class::get()( bNew, item.m_Value, m_val ); + } + }; + + template + class find_functor_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + find_functor_wrapper( Func f ): base_class(f) {} + + template + void operator()( node_type& item, Q& val ) + { + base_class::get()( item.m_Value, val ); + } + }; + + struct empty_find_functor + { + template + void operator()( node_type&, Q& ) + {} + }; + + template + class erase_functor_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + erase_functor_wrapper( Func f ): base_class( f ) {} + + void operator()(node_type& node) + { + base_class::get()( node.m_Value ); + } + }; +# endif // ifndef CDS_CXX11_LAMBDA_SUPPORT + //@endcond + + protected: + /// Forward iterator + /** + \p IsConst - constness boolean flag + + The forward iterator for a split-list has the following features: + - it has no post-increment operator + - it depends on underlying ordered list iterator + - it is safe to iterate only inside RCU critical section + - deleting an item pointed by the iterator can cause to deadlock + + Therefore, the use of iterators in concurrent environment is not good idea. + Use it for debug purpose only. + */ + template + class iterator_type: protected base_class::template iterator_type + { + //@cond + typedef typename base_class::template iterator_type iterator_base_class; + friend class SplitListSet; + //@endcond + public: + /// Value pointer type (const for const iterator) + typedef typename cds::details::make_const_type::pointer value_ptr; + /// Value reference type (const for const iterator) + typedef typename cds::details::make_const_type::reference value_ref; + + public: + /// Default ctor + iterator_type() + {} + + /// Copy ctor + iterator_type( iterator_type const& src ) + : iterator_base_class( src ) + {} + + protected: + //@cond + explicit iterator_type( iterator_base_class const& src ) + : iterator_base_class( src ) + {} + //@endcond + + public: + /// Dereference operator + value_ptr operator ->() const + { + return &(iterator_base_class::operator->()->m_Value); + } + + /// Dereference operator + value_ref operator *() const + { + return iterator_base_class::operator*().m_Value; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base_class::operator++(); + return *this; + } + + /// Assignment operator + iterator_type& operator = (iterator_type const& src) + { + iterator_base_class::operator=(src); + return *this; + } + + /// Equality operator + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base_class::operator==(i); + } + + /// Equality operator + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base_class::operator!=(i); + } + }; + + public: + /// Initializes split-ordered list of default capacity + /** + The default capacity is defined in bucket table constructor. + See intrusive::split_list::expandable_bucket_table, intrusive::split_list::static_bucket_table + which selects by intrusive::split_list::dynamic_bucket_table option. + */ + SplitListSet() + : base_class() + {} + + /// Initializes split-ordered list + SplitListSet( + size_t nItemCount ///< estimate average of item count + , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 8, default is 1. + ) + : base_class( nItemCount, nLoadFactor ) + {} + + public: + typedef iterator_type iterator ; ///< Forward iterator + typedef iterator_type const_iterator ; ///< Forward const iterator + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( base_class::begin() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( base_class::end() ); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return const_iterator( base_class::begin() ); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator end() const + { + return const_iterator( base_class::end() ); + } + + public: + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain as minimum the complete key for the node. + The object of \p value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \p value_type. + + The function applies RCU lock internally. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + return insert_node( alloc_node( val ) ); + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. It may be passed by reference + using boost::ref + + The function applies RCU lock internally. + */ + template + bool insert( Q const& val, Func f ) + { + scoped_node_ptr pNode( alloc_node( val )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( base_class::insert( *pNode, [&f](node_type& node) { cds::unref(f)( node.m_Value ) ; } )) +# else + insert_functor_wrapper fw(f); + if ( base_class::insert( *pNode, cds::ref(fw) ) ) +# endif + { + pNode.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \p value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + The function applies RCU lock internally. + + @note This function is available only for compiler that supports + variadic template and move semantics. + */ + template + bool emplace( Args&&... args ) + { + return insert_node( alloc_node( std::forward(args)...)); + } +# endif + + /// Ensures that the \p item exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p val key not found in the set, then the new item created from \p val + is inserted into the set. Otherwise, the functor \p func is called with the item found. + The functor \p Func should be a function with signature: + \code + void func( bool bNew, value_type& item, const Q& val ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p ensure function + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref. + + The function applies RCU lock internally. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair ensure( Q const& val, Func func ) + { + scoped_node_ptr pNode( alloc_node( val )); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + std::pair bRet = base_class::ensure( *pNode, + [&func, &val]( bool bNew, node_type& item, node_type const& /*val*/ ) { + cds::unref(func)( bNew, item.m_Value, val ); + } ); +# else + ensure_functor_wrapper fw( func, val ); + std::pair bRet = base_class::ensure( *pNode, cds::ref(fw) ); +# endif + + if ( bRet.first && bRet.second ) + pNode.release(); + return bRet; + } + + /// Deletes \p key from the set + /** \anchor cds_nonintrusive_SplitListSet_rcu_erase_val + + Since the key of SplitListSet's item type \p value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The set item comparator should be able to compare the values of type \p value_type + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_rcu_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + return base_class::erase_with( key, typename maker::template predicate_wrapper::type() ); + } + + /// Deletes \p key from the set + /** \anchor cds_nonintrusive_SplitListSet_rcu_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type const& val); + }; + \endcode + The functor may be passed by reference using boost:ref + + Since the key of SplitListSet's \p value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the values of the type \p value_type + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase( key, [&f](node_type& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor_wrapper fw( f ); + return base_class::erase( key, cds::ref(fw) ); +# endif + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_rcu_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::erase_with( key, typename maker::template predicate_wrapper::type(), + [&f](node_type& node) { cds::unref(f)( node.m_Value ); } ); +# else + erase_functor_wrapper fw( f ); + return base_class::erase_with( key, typename maker::template predicate_wrapper::type(), cds::ref(fw) ); +# endif + } + + /// Extracts an item from the set + /** \anchor cds_nonintrusive_SplitListSet_rcu_extract + The function searches an item with key equal to \p val in the set, + unlinks it from the set, places item pointer into \p dest argument, and returns \p true. + If the item with the key equal to \p val is not found the function return \p false. + + @note The function does NOT call RCU read-side lock or synchronization, + and does NOT dispose the item found. It just excludes the item from the set + and returns a pointer to item found. + You should lock RCU before calling of the function, and you should synchronize RCU + outside the RCU lock to free extracted item + + \code + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::container::SplitListSet< rcu, Foo > splitlist_set; + + splitlist_set theSet; + // ... + + splitlist_set::exempt_ptr p; + { + // first, we should lock RCU + splitlist_set::rcu_lock lock; + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + if ( theSet.extract( p, 10 )) { + // do something with p + ... + } + } + + // We may safely release p here + // release() passes the pointer to RCU reclamation cycle + p.release(); + \endcode + */ + template + bool extract( exempt_ptr& dest, Q const& val ) + { + node_type * pNode = base_class::extract_( val, key_comparator() ); + if ( pNode ) { + dest = pNode; + return true; + } + return false; + } + + /// Extracts an item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_rcu_extract "extract(exempt_ptr&, Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool extract_with( exempt_ptr& dest, Q const& val, Less pred ) + { + node_type * pNode = base_class::extract_with_( val, typename maker::template predicate_wrapper::type()); + if ( pNode ) { + dest = pNode; + return true; + } + return false; + } + + /// Finds the key \p val + /** \anchor cds_nonintrusive_SplitListSet_rcu_find_func + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function makes RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return find_( val, f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_rcu_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { + return find_with_( val, pred, f ); + } + + /// Find the key \p val + /** \anchor cds_nonintrusive_SplitListSet_rcu_find_cfunc + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function makes RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return find_( val, f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_rcu_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { + return find_with_( val, pred, f ); + } + + /// Finds the key \p val + /** \anchor cds_nonintrusive_SplitListSet_rcu_find_val + + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function makes RCU lock internally. + */ + template + bool find( Q const& val ) + { + return base_class::find( val ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_rcu_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred ) + { + return base_class::find_with( val, typename maker::template predicate_wrapper::type() ); + } + + /// Finds the key \p val and return the item found + /** \anchor cds_nonintrusive_SplitListSet_rcu_get + The function searches the item with key equal to \p val and returns the pointer to item found. + If \p val is not found it returns \p NULL. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::container::SplitListSet< rcu, Foo > splitlist_set; + splitlist_set theSet; + // ... + { + // Lock RCU + splitlist_set::rcu_lock lock; + + foo * pVal = theSet.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + // Unlock RCU by rcu_lock destructor + // pVal can be retired by disposer at any time after RCU has been unlocked + } + \endcode + */ + template + value_type * get( Q const& val ) + { + node_type * pNode = base_class::get( val ); + return pNode ? &pNode->m_Value : null_ptr(); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + value_type * get_with( Q const& val, Less pred ) + { + node_type * pNode = base_class::get_with( val, typename maker::template predicate_wrapper::type()); + return pNode ? &pNode->m_Value : null_ptr(); + } + + /// Clears the set (non-atomic) + /** + The function unlink all items from the set. + The function is not atomic and not lock-free and should be used for debugging only. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then assume that the set is empty. + Thus, the correct item counting feature is an important part of split-list set implementation. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + size_t size() const + { + return base_class::size(); + } + }; + + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_SPLIT_LIST_SET_RCU_H diff --git a/cds/container/striped_map.h b/cds/container/striped_map.h new file mode 100644 index 00000000..d02e481a --- /dev/null +++ b/cds/container/striped_map.h @@ -0,0 +1,1000 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_MAP_H +#define __CDS_CONTAINER_STRIPED_MAP_H + +#include +#include +#include +#include + +#ifndef CDS_CXX11_LAMBDA_SUPPORT +# include +#endif + +namespace cds { namespace container { + + //@cond + namespace details { + template + class make_striped_map + { + typedef StripedSet< Container, CDS_OPTIONS9> billet; + typedef typename billet::options billet_options; + typedef typename billet_options::hash billet_hash; + + typedef typename Container::value_type pair_type; + typedef typename pair_type::first_type key_type; + + struct options: public billet_options { + struct hash: public billet_hash { + size_t operator()( pair_type const& v ) const + { + return billet_hash::operator()( v.first ); + } + + template + size_t operator()( Q const& v ) const + { + return billet_hash::operator()( v ); + } + }; + }; + + public: + typedef StripedSet< Container, cds::opt::type_traits< options > > type ; ///< metafunction result + }; + } + //@endcond + + /// Striped hash map + /** @ingroup cds_nonintrusive_map + + Source + - [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming" + + Lock striping is very simple technique. + The map consists of the bucket table and the array of locks. + Initially, the capacity of lock array and bucket table is the same. + When the map is resized, bucket table capacity will be doubled but lock array will not. + The lock \p i protects each bucket \p j, where j = i mod L , + where \p L - the size of lock array. + + Template arguments: + - \p Container - the container class that is used as bucket entry. The \p Container class should support + an uniform interface described below. + - \p Options - options + + The \p %StripedMap class does not exactly specify the type of container that should be used as a \p Container bucket. + Instead, the class supports different container type for the bucket, for exampe, \p std::list, \p std::map and others. + + Remember that \p %StripedMap class algorithm ensures sequential blocking access to its bucket through the mutex type you specify + among \p Options template arguments. + + The \p Options are: + - opt::mutex_policy - concurrent access policy. + Available policies: intrusive::striped_set::striping, intrusive::striped_set::refinable. + Default is %striped_set::striping. + - opt::hash - hash functor. Default option value see opt::v::hash_selector which selects default hash functor for + your compiler. + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::item_counter - item counter type. Default is \p atomicity::item_counter since some operation on the counter is performed + without locks. Note that item counting is an essential part of the map algorithm, so dummy type like atomicity::empty_item_counter + is not suitable. + - opt::allocator - the allocator type using for memory allocation of bucket table and lock array. Default is CDS_DEFAULT_ALLOCATOR. + - opt::resizing_policy - the resizing policy that is a functor that decides when to resize the hash map. + Default option value depends on bucket container type: + for sequential containers like \p std::list, \p std::vector the resizing policy is hash_set::load_factor_resizing<4>; + for other type of containers like \p std::map, \p std::unordered_map the resizing policy is hash_set::no_resizing. + See \ref intrusive::striped_set namespace for list of all possible types of the option. + Note that the choose of resizing policy depends of \p Container type: + for sequential containers like \p std::list, \p std::vector and so on, right choosing of the policy can + significantly improve performance. + For other, non-sequential types of \p Container (like a \p std::map) + the resizing policy is not so important. + - opt::copy_policy - the copy policy which is used to copy items from the old map to the new one when resizing. + The policy can be optionally used in adapted bucket container for performance reasons of resizing. + The detail of copy algorithm depends on type of bucket container and explains below. + + \p opt::compare or \p opt::less options are used only in some \p Container class for searching an item. + \p %opt::compare option has the highest priority: if \p %opt::compare is specified, \p %opt::less is not used. + + You can pass other option that would be passed to adapt metafunction, see below. + + Internal details + + The \p %StripedMap class cannot utilize the \p Container container specified directly, but only its adapted variant which + supports an unified interface. Internally, the adaptation is made via hash_set::adapt metafunction that wraps bucket container + and provides the unified bucket interface suitable for \p %StripedMap. Such adaptation is completely transparent for you - + you don't need to call \p adapt metafunction directly, \p %StripedMap class's internal machinery itself invokes appropriate + \p adapt metafunction to adjust your \p Container container class to \p %StripedMap bucket's internal interface. + All you need is to include a right header before striped_hash_map.h. + + By default, intrusive::striped_set::adapt metafunction does not make any wrapping to \p AnyContainer, + so, the result %intrusive::striped_set::adapt::type is the same as \p AnyContainer. + However, there are a lot of specializations of \p adapt for well-known containers, see table below. + Any of this specialization wraps corresponding container making it suitable for the map's bucket. + Remember, you should include the proper header file for \p adapt before striped_map.h. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Container.h-file for \p adaptExampleNotes
\p std::list\code + #include + #include + typedef cds::container::StripedMap< + std::list< std::pair< const Key, V > >, + cds::opt::less< std::less > + > striped_map; + \endcode + + The type of values stored in the \p std::list must be std::pair< const Key, V > , where \p Key - key type, and \p V - value type + The list is ordered by key \p Key. + Template argument pack \p Options must contain cds::opt::less or cds::opt::compare for type \p Key stored in the list. +
\p std::map\code + #include + #include + typedef cds::container::StripedMap< + std::map< Key, T, std::less > + > striped_map; + \endcode + +
\p std::unordered_map\code + #include + #include + typedef cds::container::StripedMap< + std::unordered_map< + Key, T, + std::hash, + std::equal_to + > + > striped_map; + \endcode + + You should provide two different hash function \p h1 and \p h2 - one for std::unordered_map and other for \p %StripedMap. + For the best result, \p h1 and \p h2 must be orthogonal i.e. h1(X) != h2(X) for any value \p X of type \p Key. +
\p stdext::hash_map (only for MS VC++ 2008)\code + #include + #include + typedef cds::container::StripedMap< + stdext::hash_map< Key, T, + stdext::hash_compare< + Key, + std::less + > + > + > striped_map; + \endcode + + You should provide two different hash function \p h1 and \p h2 - one for stdext::hash_map and other for \p %StripedMap. + For the best result, \p h1 and \p h2 must be orthogonal i.e. h1(X) != h2(X) for any value \p X of type \p Key. +
\p boost::container::slist\code + #include + #include + typedef cds::container::StripedMap< + boost::container::slist< std::pair< const Key, T > > + > striped_map; + \endcode + + The type of values stored in the \p boost::container::slist must be std::pair< const Key, T > , + where \p Key - key type, and \p T - value type. The list is ordered. + \p Options must contain cds::opt::less or cds::opt::compare. +
\p boost::container::list\code + #include + #include + typedef cds::container::StripedMap< + boost::container::list< std::pair< const Key, T > > + > striped_map; + \endcode + + The type of values stored in the \p boost::container::list must be std::pair< const Key, T > , + where \p Key - key type, and \p T - value type. The list is ordered. + \p Options must contain cds::opt::less or cds::opt::compare. +
\p boost::container::map\code + #include + #include + typedef cds::container::StripedMap< + boost::container::map< Key, T, std::pair< const Key, T> > + > striped_map; + \endcode + +
\p boost::container::flat_map\code + #include + #include + typedef cds::container::StripedMap< + boost::container::flat_map< Key, T, + std::less< std::pair< const Key, T> > + > + > striped_map; + \endcode + +
\p boost::unordered_map\code + #include + #include + typedef cds::container::StripedMap< + boost::unordered_map< Key, T, boost::hash, std::equal_to > + > refinable_map; + \endcode + +
+ + + You can use another container type as map's bucket. + Suppose, you have a container class \p MyBestContainer and you want to integrate it with \p %StripedMap as bucket type. + There are two possibility: + - either your \p MyBestContainer class has native support of bucket's interface; + in this case, you can use default hash_set::adapt metafunction; + - or your \p MyBestContainer class does not support bucket's interface, which means, that you should develop a specialization + cds::container::hash_set::adapt metafunction providing necessary interface. + + The hash_set::adapt< Container, Options... > metafunction has two template argument: + - \p Container is the class that should be used as the bucket, for example, std::list< std::pair< Key, T > >. + - \p Options pack is the options from \p %StripedMap declaration. The \p adapt metafunction can use + any option from \p Options for its internal use. For example, a \p compare option can be passed to \p adapt + metafunction via \p Options argument of \p %StripedMap declaration. + + See hash_set::adapt metafunction for the description of interface that the bucket container must provide + to be \p %StripedMap compatible. + + Copy policy + There are three predefined copy policy: + - \p cds::container::hash_set::copy_item - copy item from old bucket to new one when resizing using copy ctor. It is default policy for + any compiler that do not support move semantics + - \p cds::container::hash_set::move_item - move item from old bucket to new one when resizing using move semantics. It is default policy for + any compiler that support move semantics. If compiler does not support move semantics, the move policy is the same as \p copy_item + - \p cds::container::hash_set::swap_item - copy item from old bucket to new one when resizing using \p std::swap. Not all containers support + this copy policy, see details in table below. + + You can define your own copy policy specifically for your case. + Note, right copy policy can significantly improve the performance of resizing. + + + + + + + + + + + + + + + + + +
ContainerPolicies
+ - \p std::list + - \p boost::list + \code + struct copy_item { + void operator()( + std::list< std::pair >& list, + std::list >::iterator itInsert, + std::list >::iterator itWhat ) + { + list.insert( itInsert, *itWhat ); + } + } \endcode + + \code + // The type T stored in the list must be swappable + struct swap_item { + void operator()( + std::list< std::pair >& list, + std::list >::iterator itInsert, + std::list >::iterator itWhat ) + { + std::pair newVal( itWhat->first, T() ); + std::swap( list.insert( itInsert, newVal )->second, itWhat->second ); + } + } \endcode + + \code + struct move_item { + void operator()( + std::list< std::pair >& list, + std::list >::iterator itInsert, + std::list >::iterator itWhat ) + { + list.insert( itInsert, std::move( *itWhat ) ); + } + } \endcode +
+ - \p std::map + - \p std::unordered_map + - \p stdext::hash_map (only for MS VC++ 2008) + - \p boost::container::map + - \p boost::container::flat_map + - \p boost::unordered_map + \code + struct copy_item { + void operator()( std::map< Key, T>& map, std::map::iterator itWhat ) + { + map.insert( *itWhat ); + } + } \endcode + + \code + struct swap_item { + void operator()( std::map< Key, T>& map, std::map::iterator itWhat ) + { + std::swap( + map.insert( + std::map::value_type( itWhat->first, T() ) ).first->second + , itWhat->second + )); + } + } \endcode + \p T type must be swappable. + + \code + struct move_item { + void operator()( std::map< Key, T>& map, std::map::iterator itWhat ) + { + map.insert( std::move( *itWhat )); + } + } \endcode +
\p boost::container::slist\code + struct copy_item { + void operator()( + bc::slist< std::pair >& list, + bc::slist >::iterator itInsert, + bc::slist >::iterator itWhat ) + { + list.insert_after( itInsert, *itWhat ); + } + } \endcode + + \code + // The type T stored in the list must be swappable + struct swap_item { + void operator()( + bc::slist< std::pair >& list, + bc::slist >::iterator itInsert, + bc::slist >::iterator itWhat ) + { + std::pair newVal( itWhat->first, T() ); + std::swap( list.insert( itInsert, newVal )->second, itWhat->second ); + } + } \endcode + + \code + struct move_item { + void operator()( + bc::slist< std::pair >& list, + bc::slist >::iterator itInsert, + bc::slist >::iterator itWhat ) + { + list.insert_after( itInsert, std::move( *itWhat ) ); + } + } \endcode +
+ + Advanced functions + + libcds provides some advanced functions like \p erase_with, \p find_with, + that cannot be supported by all underlying containers. + The table below shows whether underlying container supports those functions + (the sign "+" means "container supports the function"): + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Container\p find_with\p erse_with
\p std::list++
\p std::map--
\p std::unordered_map--
\p stdext::hash_map (only for MS VC++ 2008)--
\p boost::container::slist++
\p boost::container::list++
\p boost::container::map--
\p boost::container::flat_map--
\p boost::unordered_map--
+ + **/ + template + class StripedMap +#ifdef CDS_DOXYGEN_INVOKED + : protected StripedSet +#else + : protected details::make_striped_map< Container, CDS_OPTIONS9>::type +#endif + { + //@cond + typedef typename details::make_striped_map< Container, CDS_OPTIONS9>::type base_class; + //@endcond + + public: + //@cond + typedef typename base_class::default_options default_options; + typedef typename base_class::options options; + //@endcond + + typedef Container underlying_container_type ; ///< original intrusive container type for the bucket + typedef typename base_class::bucket_type bucket_type ; ///< container type adapted for hash set + typedef typename bucket_type::value_type value_type ; ///< pair type ( std::pair ) + typedef typename value_type::first_type key_type ; ///< key type + typedef typename value_type::second_type mapped_type ; ///< mapped type + + typedef typename base_class::hash hash ; ///< Hash functor + typedef typename base_class::item_counter item_counter ; ///< Item counter + typedef typename base_class::resizing_policy resizing_policy ; ///< Resizing policy + typedef typename base_class::allocator_type allocator_type ; ///< allocator type specified in options. + typedef typename base_class::mutex_policy mutex_policy ; ///< Mutex policy + + protected: + //@cond + typedef typename base_class::scoped_cell_lock scoped_cell_lock; + typedef typename base_class::scoped_full_lock scoped_full_lock; + typedef typename base_class::scoped_resize_lock scoped_resize_lock; + //@endcond + + private: + //@cond + struct key_accessor { + key_type const& operator()( value_type const& p ) const + { + return p.first; + } + }; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + + template + class find_functor_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + find_functor_wrapper() {} + find_functor_wrapper( Func f ): base_class(f) {} + + template + void operator()( value_type& pair, Q const& /*val*/ ) + { + base_class::get()( pair ); + } + }; + + template + class insert_value_functor + { + Q const& m_val; + public: + insert_value_functor( Q const & v) + : m_val(v) + {} + + void operator()( value_type& item ) + { + item.second = m_val; + } + }; + + struct dummy_insert_functor + { + void operator()( value_type& item ) + {} + }; +# endif // #ifndef CDS_CXX11_LAMBDA_SUPPORT + + //@endcond + + public: + /// Default ctor. The initial capacity is 16. + StripedMap() + : base_class() + {} + + /// Ctor with initial capacity specified + StripedMap( + size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. + ) : base_class( nCapacity ) + {} + + /// Ctor with resizing policy (copy semantics) + /** + This constructor initializes m_ResizingPolicy member with copy of \p resizingPolicy parameter + */ + StripedMap( + size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. + ,resizing_policy const& resizingPolicy ///< Resizing policy + ) : base_class( nCapacity, resizingPolicy ) + {} + +#ifdef CDS_RVALUE_SUPPORT + /// Ctor with resizing policy (move semantics) + /** + This constructor initializes m_ResizingPolicy member moving \p resizingPolicy parameter + Move semantics is used. Available only for the compilers that supports C++11 rvalue reference. + */ + StripedMap( + size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. + ,resizing_policy&& resizingPolicy ///< Resizing policy + ) : base_class( nCapacity, std::forward(resizingPolicy) ) + {} +#endif + + /// Destructor destroys internal data + ~StripedMap() + {} + + public: + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from a value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K const& key ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return insert_key( key, [](value_type&){} ); +# else + return insert_key( key, dummy_insert_functor() ); +# endif + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return insert_key( key, [&val](value_type& item) { item.second = val ; } ); +# else + insert_value_functor f(val); + return insert_key( key, cds::ref(f) ); +# endif + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + The user-defined functor can be passed by reference using boost::ref + and it is called only if inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + */ + template + bool insert_key( const K& key, Func func ) + { + return base_class::insert( key, func ); + } + +# ifdef CDS_EMPLACE_SUPPORT + /// For key \p key inserts data of type \ref mapped_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( K&& key, Args&&... args ) + { + bool bOk; + bool bResize; + size_t nHash = base_class::hashing( std::forward(key)); + bucket_type * pBucket; + { + scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); + pBucket = base_class::bucket( nHash ); + + bOk = pBucket->emplace( std::forward(key), std::forward(args)...); + bResize = bOk && base_class::m_ResizingPolicy( ++base_class::m_ItemCounter, *this, *pBucket ); + } + + if ( bResize ) + base_class::resize(); + + return bOk; + } +# endif + + /// Ensures that the \p key exists in the map + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the map, then the new item created from \p key + is inserted into the map (note that in this case the \ref key_type should be + constructible from type \p K). + Otherwise, the functor \p func is called with item found. + The functor \p Func may be a function with signature: + \code + void func( bool bNew, value_type& item ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + + The functor may change any fields of the \p item.second that is \ref mapped_type. + + You may pass \p func argument by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( K const& key, Func func ) + { + std::pair result; + bool bResize; + size_t nHash = base_class::hashing( key ); + bucket_type * pBucket; + { + scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); + pBucket = base_class::bucket( nHash ); + + result = pBucket->ensure( key, func ); + bResize = result.first && result.second && base_class::m_ResizingPolicy( ++base_class::m_ItemCounter, *this, *pBucket ); + } + + if ( bResize ) + base_class::resize(); + return result; + } + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_StripedMap_erase + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return base_class::erase( key ); + } + +#ifdef CDS_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS_SUPPORT + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_StripedMap_erase "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the map. + + @note This function is enabled if the compiler supports C++11 + default template arguments for function template and the underlying container + supports \p %erase_with feature. + */ + template < typename K, typename Less + ,typename Bucket = bucket_type, typename = typename std::enable_if< Bucket::has_erase_with >::type > + bool erase_with( K const& key, Less pred ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_with( key, pred, [](value_type const&) {} ); +# else + return erase_with( key, pred, typename base_class::empty_erase_functor() ); +# endif + } +#endif + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_StripedMap_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + The functor may be passed by reference using boost:ref + + Return \p true if key is found and deleted, \p false otherwise + + See also: \ref erase + */ + template + bool erase( K const& key, Func f ) + { + return base_class::erase( key, f ); + } + +#ifdef CDS_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS_SUPPORT + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_StripedMap_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the map. + + @note This function is enabled if the compiler supports C++11 + default template arguments for function template and the underlying container + supports \p %erase_with feature. + */ + template ::type > + bool erase_with( K const& key, Less pred, Func f ) + { + return base_class::erase_with( key, cds::details::predicate_wrapper< value_type, Less, key_accessor >(), f ); + } +#endif + + /// Find the key \p key + /** \anchor cds_nonintrusive_StripedMap_find_func + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + You can pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change \p item.second. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find( key, [&f]( value_type& pair, K const& ) mutable { cds::unref(f)(pair); } ); +# else + find_functor_wrapper fw(f); + return base_class::find( key, cds::ref(fw) ); +# endif + } + +#ifdef CDS_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS_SUPPORT + /// Find the key \p val using \p pred predicate + /** + The function is an analog of \ref cds_nonintrusive_StripedMap_find_func "find(K const&, Func)" + but \p pred is used for key comparing + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + + @note This function is enabled if the compiler supports C++11 + default template arguments for function template and the underlying container + supports \p %find_with feature. + */ + template ::type > + bool find_with( K const& key, Less pred, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return base_class::find_with( key, cds::details::predicate_wrapper< value_type, Less, key_accessor >(), + [&f]( value_type& pair, K const& ) mutable { cds::unref(f)(pair); } ); +# else + find_functor_wrapper fw(f); + return base_class::find_with( key, cds::details::predicate_wrapper< value_type, Less, key_accessor >(), cds::ref(fw) ); +# endif + } +#endif + + /// Find the key \p key + /** \anchor cds_nonintrusive_StripedMap_find_val + + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool find( K const& key ) + { + return base_class::find( key ); + } + +#ifdef CDS_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS_SUPPORT + /// Find the key \p val using \p pred predicate + /** + The function is an analog of \ref cds_nonintrusive_StripedMap_find_val "find(K const&)" + but \p pred is used for key comparing + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + + @note This function is enabled if the compiler supports C++11 + default template arguments for function template and the underlying container + supports \p %find_with feature. + */ + template ::type > + bool find_with( K const& key, Less pred ) + { + return base_class::find_with( key, cds::details::predicate_wrapper< value_type, Less, key_accessor >() ); + } +#endif + + /// Clears the map + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + + /// Returns the size of hash table + /** + The hash table size is non-constant and can be increased via resizing. + */ + size_t bucket_count() const + { + return base_class::bucket_count(); + } + + /// Returns lock array size + /** + The lock array size is constant. + */ + size_t lock_count() const + { + return base_class::lock_count(); + } + + /// Returns resizing policy object + resizing_policy& get_resizing_policy() + { + return base_class::get_resizing_policy(); + } + + /// Returns resizing policy (const version) + resizing_policy const& get_resizing_policy() const + { + return base_class::get_resizing_policy(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_STRIPED_MAP_H diff --git a/cds/container/striped_map/boost_flat_map.h b/cds/container/striped_map/boost_flat_map.h new file mode 100644 index 00000000..d16d89f2 --- /dev/null +++ b/cds/container/striped_map/boost_flat_map.h @@ -0,0 +1,58 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_MAP_BOOST_FLAT_MAP_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_MAP_BOOST_FLAT_MAP_ADAPTER_H + +#include +#if BOOST_VERSION < 104800 +# error "For boost::container::flat_map you must use boost 1.48 or above" +#endif + +#include +#include + +//#if CDS_COMPILER == CDS_COMPILER_MSVC && CDS_COMPILER_VERSION >= 1700 +//# error "boost::container::flat_map is not compatible with MS VC++ 11" +//#endif + + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for map + template + struct copy_item_policy< boost::container::flat_map< Key, T, Traits, Alloc > > + : public details::boost_map_copy_policies >::copy_item_policy + {}; + + // Swap item policy + template + struct swap_item_policy< boost::container::flat_map< Key, T, Traits, Alloc > > + : public details::boost_map_copy_policies >::swap_item_policy + {}; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for map + template + struct move_item_policy< boost::container::flat_map< Key, T, Traits, Alloc > > + : public details::boost_map_copy_policies >::move_item_policy + {}; +#endif + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + template + class adapt< boost::container::flat_map< Key, T, Traits, Alloc>, CDS_OPTIONS > + { + public: + typedef boost::container::flat_map< Key, T, Traits, Alloc> container_type ; ///< underlying container type + typedef cds::container::striped_set::details::boost_map_adapter< container_type, CDS_OPTIONS > type; + }; +}}} // namespace cds::intrusive::striped_set + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_MAP_BOOST_FLAT_MAP_ADAPTER_H diff --git a/cds/container/striped_map/boost_list.h b/cds/container/striped_map/boost_list.h new file mode 100644 index 00000000..df4c4d0f --- /dev/null +++ b/cds/container/striped_map/boost_list.h @@ -0,0 +1,273 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_MAP_BOOST_LIST_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_MAP_BOOST_LIST_ADAPTER_H + +#include +#if BOOST_VERSION < 104800 +# error "For boost::container::list you must use boost 1.48 or above" +#endif + +#include +#include +#include +#include // std::lower_bound +#include // std::pair + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for map + template + struct copy_item_policy< boost::container::list< std::pair< K const, T >, Alloc > > + { + typedef std::pair< K const, T> pair_type; + typedef boost::container::list< pair_type, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + list.insert( itInsert, *itWhat ); + } + }; + + // Swap policy for map + template + struct swap_item_policy< boost::container::list< std::pair< K const, T >, Alloc > > + { + typedef std::pair< K const, T> pair_type; + typedef boost::container::list< pair_type, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + pair_type newVal( itWhat->first, typename pair_type::second_type() ); + itInsert = list.insert( itInsert, newVal ); + std::swap( itInsert->second, itWhat->second ); + } + }; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for map + template + struct move_item_policy< boost::container::list< std::pair< K const, T >, Alloc > > + { + typedef std::pair< K const, T> pair_type; + typedef boost::container::list< pair_type, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + list.insert( itInsert, std::move( *itWhat ) ); + } + }; +#endif + } // namespace striped_set +}} // namespace cds:container + +namespace cds { namespace intrusive { namespace striped_set { + + /// boost::container::list adapter for hash map bucket + template + class adapt< boost::container::list< std::pair, Alloc>, CDS_OPTIONS > + { + public: + typedef boost::container::list< std::pair, Alloc> container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename value_type::first_type key_type; + typedef typename value_type::second_type mapped_type; + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = true; + static bool const has_erase_with = true; + + private: + //@cond + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, CDS_OPTIONS >::type key_comparator; + + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , CDS_OPTIONS + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy +#endif + >::type copy_item; + + struct find_predicate + { + bool operator()( value_type const& i1, value_type const& i2) const + { + return key_comparator()( i1.first, i2.first ) < 0; + } + + template + bool operator()( Q const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2.first ) < 0; + } + + template + bool operator()( value_type const& i1, Q const& i2) const + { + return key_comparator()( i1.first, i2 ) < 0; + } + }; + //@endcond + + private: + //@cond + container_type m_List; + //@endcond + + public: + adapted_container() + {} + + template + bool insert( const Q& key, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate() ); + if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) { + //value_type newItem( key ); + it = m_List.insert( it, value_type( key, mapped_type()) ); + cds::unref( f )( *it ); + + return true; + } + + // key already exists + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace( K&& key, Args&&... args ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate() ); + if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) { + m_List.emplace( it, std::forward(key), std::move( mapped_type( std::forward(args)... )) ); + return true; + } + return false; + } +# endif + + template + std::pair ensure( const Q& key, Func func ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate() ); + if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) { + // insert new + value_type newItem( key, mapped_type() ); + it = m_List.insert( it, newItem ); + cds::unref( func )( true, *it ); + + return std::make_pair( true, true ); + } + else { + // already exists + cds::unref( func )( false, *it ); + return std::make_pair( true, false ); + } + } + + template + bool erase( Q const& key, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate() ); + if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) + return false; + + // key exists + cds::unref( f )( *it ); + m_List.erase( it ); + + return true; + } + + template + bool erase( Q const& key, Less pred, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, pred ); + if ( it == m_List.end() || pred( key, it->first ) || pred(it->first, key) ) + return false; + + // key exists + cds::unref( f )( *it ); + m_List.erase( it ); + + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate() ); + if ( it == m_List.end() || key_comparator()( val, it->first ) != 0 ) + return false; + + // key exists + cds::unref( f )( *it, val ); + return true; + } + + template + bool find( Q& val, Less pred, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, pred ); + if ( it == m_List.end() || pred( val, it->first ) || pred( it->first, val ) ) + return false; + + // key exists + cds::unref( f )( *it, val ); + return true; + } + + /// Clears the container + void clear() + { + m_List.clear(); + } + + iterator begin() { return m_List.begin(); } + const_iterator begin() const { return m_List.begin(); } + iterator end() { return m_List.end(); } + const_iterator end() const { return m_List.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), *itWhat, find_predicate() ); + assert( it == m_List.end() || key_comparator()( itWhat->first, it->first ) != 0 ); + + copy_item()( m_List, it, itWhat ); + } + + size_t size() const + { + return m_List.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; + +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_MAP_BOOST_LIST_ADAPTER_H diff --git a/cds/container/striped_map/boost_map.h b/cds/container/striped_map/boost_map.h new file mode 100644 index 00000000..371cf9c7 --- /dev/null +++ b/cds/container/striped_map/boost_map.h @@ -0,0 +1,54 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_MAP_BOOST_MAP_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_MAP_BOOST_MAP_ADAPTER_H + +#include +#if BOOST_VERSION < 104800 +# error "For boost::container::map you must use boost 1.48 or above" +#endif + +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for map + template + struct copy_item_policy< boost::container::map< Key, T, Traits, Alloc > > + : public details::boost_map_copy_policies >::copy_item_policy + {}; + + // Swap item policy + template + struct swap_item_policy< boost::container::map< Key, T, Traits, Alloc > > + : public details::boost_map_copy_policies >::swap_item_policy + {}; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for map + template + struct move_item_policy< boost::container::map< Key, T, Traits, Alloc > > + : public details::boost_map_copy_policies >::move_item_policy + {}; +#endif + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// std::set adapter for hash set bucket + template + class adapt< boost::container::map< Key, T, Traits, Alloc>, CDS_OPTIONS > + { + public: + typedef boost::container::map< Key, T, Traits, Alloc> container_type ; ///< underlying container type + typedef cds::container::striped_set::details::boost_map_adapter< container_type, CDS_OPTIONS > type; + }; +}}} // namespace cds::intrusive::striped_set + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_MAP_BOOST_MAP_ADAPTER_H diff --git a/cds/container/striped_map/boost_slist.h b/cds/container/striped_map/boost_slist.h new file mode 100644 index 00000000..52445979 --- /dev/null +++ b/cds/container/striped_map/boost_slist.h @@ -0,0 +1,285 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_MAP_BOOST_SLIST_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_MAP_BOOST_SLIST_ADAPTER_H + +#include +#if BOOST_VERSION < 104800 +# error "For boost::container::slist you must use boost 1.48 or above" +#endif + +#include +#include +#include +#include // std::pair + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for map + template + struct copy_item_policy< boost::container::slist< std::pair< K const, T >, Alloc > > + { + typedef std::pair< K const, T> pair_type; + typedef boost::container::slist< pair_type, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + itInsert = list.insert_after( itInsert, *itWhat ); + } + }; + + // Swap policy for map + template + struct swap_item_policy< boost::container::slist< std::pair< K const, T >, Alloc > > + { + typedef std::pair< K const, T> pair_type; + typedef boost::container::slist< pair_type, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + pair_type newVal( itWhat->first, typename pair_type::mapped_type() ); + itInsert = list.insert_after( itInsert, newVal ); + std::swap( itInsert->second, itWhat->second ); + } + }; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for map + template + struct move_item_policy< boost::container::slist< std::pair< K const, T >, Alloc > > + { + typedef std::pair< K const, T> pair_type; + typedef boost::container::slist< pair_type, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + list.insert_after( itInsert, std::move( *itWhat ) ); + } + }; +#endif + } // namespace striped_set +}} // namespace cds:container + +namespace cds { namespace intrusive { namespace striped_set { + + /// boost::container::slist adapter for hash map bucket + template + class adapt< boost::container::slist< std::pair, Alloc>, CDS_OPTIONS > + { + public: + typedef boost::container::slist< std::pair, Alloc> container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename value_type::first_type key_type; + typedef typename value_type::second_type mapped_type; + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = true; + static bool const has_erase_with = true; + + private: + //@cond + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, CDS_OPTIONS >::type key_comparator; + + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , CDS_OPTIONS + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy +#endif + >::type copy_item; + + template + std::pair< iterator, bool > find_prev_item( Q const& key ) + { + iterator itPrev = m_List.before_begin(); + iterator itEnd = m_List.end(); + for ( iterator it = m_List.begin(); it != itEnd; ++it ) { + int nCmp = key_comparator()( key, it->first ); + if ( nCmp < 0 ) + itPrev = it; + else if ( nCmp > 0 ) + break; + else + return std::make_pair( itPrev, true ); + } + return std::make_pair( itPrev, false ); + } + + template + std::pair< iterator, bool > find_prev_item( Q const& key, Less pred ) + { + iterator itPrev = m_List.before_begin(); + iterator itEnd = m_List.end(); + for ( iterator it = m_List.begin(); it != itEnd; ++it ) { + if ( pred( key, it->first )) + itPrev = it; + else if ( pred(it->first, key)) + break; + else + return std::make_pair( itPrev, true ); + } + return std::make_pair( itPrev, false ); + } + //@endcond + + private: + //@cond + container_type m_List; + //@endcond + + public: + adapted_container() + {} + + template + bool insert( const Q& key, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( key ); + if ( !pos.second ) { + value_type newItem( key, mapped_type() ); + pos.first = m_List.insert_after( pos.first, newItem ); + cds::unref( f )( *pos.first ); + return true; + } + + // key already exists + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace( K&& key, Args&&... args ) + { + std::pair< iterator, bool > pos = find_prev_item( key ); + if ( !pos.second ) { + m_List.emplace_after( pos.first, std::forward(key), std::move( mapped_type( std::forward(args)... ))); + return true; + } + return false; + } +# endif + + template + std::pair ensure( const Q& key, Func func ) + { + std::pair< iterator, bool > pos = find_prev_item( key ); + if ( !pos.second ) { + // insert new + value_type newItem( key, mapped_type() ); + pos.first = m_List.insert_after( pos.first, newItem ); + cds::unref( func )( true, *pos.first ); + return std::make_pair( true, true ); + } + else { + // already exists + cds::unref( func )( false, *(++pos.first) ); + return std::make_pair( true, false ); + } + } + + template + bool erase( Q const& key, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( key ); + if ( !pos.second ) + return false; + + // key exists + iterator it = pos.first; + cds::unref( f )( *(++it) ); + m_List.erase_after( pos.first ); + + return true; + } + + template + bool erase( Q const& key, Less pred, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( key, pred ); + if ( !pos.second ) + return false; + + // key exists + iterator it = pos.first; + cds::unref( f )( *(++it) ); + m_List.erase_after( pos.first ); + + return true; + } + + template + bool find( Q& val, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( val ); + if ( !pos.second ) + return false; + + // key exists + cds::unref( f )( *(++pos.first), val ); + return true; + } + + template + bool find( Q& val, Less pred, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( val, pred ); + if ( !pos.second ) + return false; + + // key exists + cds::unref( f )( *(++pos.first), val ); + return true; + } + + void clear() + { + m_List.clear(); + } + + iterator begin() { return m_List.begin(); } + const_iterator begin() const { return m_List.begin(); } + iterator end() { return m_List.end(); } + const_iterator end() const { return m_List.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + std::pair< iterator, bool > pos = find_prev_item( itWhat->first ); + assert( !pos.second ); + + copy_item()( m_List, pos.first, itWhat ); + } + + size_t size() const + { + return m_List.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrusive::striped_set + + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_MAP_BOOST_SLIST_ADAPTER_H diff --git a/cds/container/striped_map/boost_unordered_map.h b/cds/container/striped_map/boost_unordered_map.h new file mode 100644 index 00000000..05076e3b --- /dev/null +++ b/cds/container/striped_map/boost_unordered_map.h @@ -0,0 +1,50 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_MAP_BOOST_UNORDERED_MAP_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_MAP_BOOST_UNORDERED_MAP_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for map + template + struct copy_item_policy< boost::unordered_map< Key, T, Traits, Alloc > > + : public details::boost_map_copy_policies >::copy_item_policy + {}; + + // Swap policy for map + template + struct swap_item_policy< boost::unordered_map< Key, T, Traits, Alloc > > + : public details::boost_map_copy_policies >::swap_item_policy + {}; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for map + template + struct move_item_policy< boost::unordered_map< Key, T, Traits, Alloc > > + : public details::boost_map_copy_policies >::move_item_policy + {}; +#endif + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// boost::unordered_map adapter for hash map bucket + template + class adapt< boost::unordered_map< Key, T, Hash, Pred, Alloc>, CDS_OPTIONS > + { + public: + typedef boost::unordered_map< Key, T, Hash, Pred, Alloc> container_type ; ///< underlying container type + typedef cds::container::striped_set::details::boost_map_adapter< container_type, CDS_OPTIONS > type; + }; +}}} // namespace cds::intrusive::striped_set + + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_MAP_BOOST_UNORDERED_MAP_ADAPTER_H diff --git a/cds/container/striped_map/std_hash_map.h b/cds/container/striped_map/std_hash_map.h new file mode 100644 index 00000000..7b1fa6bc --- /dev/null +++ b/cds/container/striped_map/std_hash_map.h @@ -0,0 +1,13 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_MAP_STD_HASH_MAP_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_MAP_STD_HASH_MAP_ADAPTER_H + +#include +#if (CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL) && _MSC_VER < 1600 // MS VC 2008 +# include +#else +# include +#endif + +#endif // #ifndef __CDS_CONTAINER_STRIPED_MAP_STD_HASH_MAP_ADAPTER_H diff --git a/cds/container/striped_map/std_hash_map_std.h b/cds/container/striped_map/std_hash_map_std.h new file mode 100644 index 00000000..404d1d9b --- /dev/null +++ b/cds/container/striped_map/std_hash_map_std.h @@ -0,0 +1,195 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_MAP_STD_HASH_MAP_STD_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_MAP_STD_HASH_MAP_STD_ADAPTER_H + +#ifndef __CDS_CONTAINER_STRIPED_MAP_STD_HASH_MAP_ADAPTER_H +# error must be included instead of header +#endif + +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for map + template + struct copy_item_policy< std::unordered_map< Key, T, Hash, Pred, Alloc > > + { + typedef std::unordered_map< Key, T, Hash, Pred, Alloc > map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + map.insert( *itWhat ); + } + }; + + // Swap policy for map + template + struct swap_item_policy< std::unordered_map< Key, T, Hash, Pred, Alloc > > + { + typedef std::unordered_map< Key, T, Hash, Pred, Alloc > map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + pair_type pair( itWhat->first, typename pair_type::second_type() ); + std::pair res = map.insert( pair ); + assert( res.second ); + std::swap( res.first->second, itWhat->second ); + } + }; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for map + template + struct move_item_policy< std::unordered_map< Key, T, Hash, Pred, Alloc > > + { + typedef std::unordered_map< Key, T, Hash, Pred, Alloc > map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + map.insert( std::move( *itWhat ) ); + } + }; +#endif + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// std::unordered_map adapter for hash map bucket + template + class adapt< std::unordered_map< Key, T, Hash, Pred, Alloc>, CDS_OPTIONS > + { + public: + typedef std::unordered_map< Key, T, Hash, Pred, Alloc> container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::key_type key_type; + typedef typename container_type::mapped_type mapped_type; + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = false; + static bool const has_erase_with = false; + + private: + //@cond + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , CDS_OPTIONS + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy +#endif + >::type copy_item; + //@endcond + + private: + //@cond + container_type m_Map; + //@endcond + + public: + template + bool insert( const Q& key, Func f ) + { + std::pair res = m_Map.insert( value_type( key, mapped_type() )); + if ( res.second ) + ::cds::unref(f)( const_cast(*res.first) ); + return res.second; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace( Q&& key, Args&&... args ) + { +# if CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION < 40800 || CDS_COMPILER == CDS_COMPILER_CLANG && !defined(__LIBCPP_VERSION) + // GCC < 4.8: std::map has no "emplace" member function. Emulate it + std::pair res = m_Map.insert( value_type( std::forward(key), mapped_type( std::forward(args)...))); +# else + std::pair res = m_Map.emplace( std::forward(key), std::move( mapped_type(std::forward(args)...)) ); +# endif + return res.second; + } +# endif + + template + std::pair ensure( const Q& key, Func func ) + { + std::pair res = m_Map.insert( value_type( key, mapped_type() ) ); + cds::unref(func)( res.second, const_cast(*res.first)); + return std::make_pair( true, res.second ); + } + + template + bool erase( const Q& key, Func f ) + { + iterator it = m_Map.find( key_type(key) ); + if ( it == m_Map.end() ) + return false; + ::cds::unref(f)( const_cast(*it) ); + m_Map.erase( it ); + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = m_Map.find( key_type(val) ); + if ( it == m_Map.end() ) + return false; + ::cds::unref(f)( const_cast(*it), val ); + return true; + } + + void clear() + { + m_Map.clear(); + } + + iterator begin() { return m_Map.begin(); } + const_iterator begin() const { return m_Map.begin(); } + iterator end() { return m_Map.end(); } + const_iterator end() const { return m_Map.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + assert( m_Map.find( itWhat->first ) == m_Map.end() ); + copy_item()( m_Map, itWhat ); + } + + size_t size() const + { + return m_Map.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrusive::striped_set + + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_MAP_STD_HASH_MAP_STD_ADAPTER_H diff --git a/cds/container/striped_map/std_hash_map_vc.h b/cds/container/striped_map/std_hash_map_vc.h new file mode 100644 index 00000000..34ede223 --- /dev/null +++ b/cds/container/striped_map/std_hash_map_vc.h @@ -0,0 +1,182 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_MAP_STD_HASH_MAP_MSVC_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_MAP_STD_HASH_MAP_MSVC_ADAPTER_H + +#ifndef __CDS_CONTAINER_STRIPED_MAP_STD_HASH_MAP_ADAPTER_H +# error must be included instead of header +#endif + +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for map + template + struct copy_item_policy< stdext::hash_map< Key, T, Traits, Alloc > > + { + typedef stdext::hash_map< Key, T, Traits, Alloc > map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + std::pair< typename map_type::iterator, bool> res = map.insert( *itWhat ); + assert( res.second ) ; // succesful insert + } + }; + + // Swap policy for map + template + struct swap_item_policy< stdext::hash_map< Key, T, Traits, Alloc > > + { + typedef stdext::hash_map< Key, T, Traits, Alloc > map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + pair_type newVal( itWhat->first, typename pair_type::second_type() ); + std::pair< typename map_type::iterator, bool> res = map.insert( newVal ); + assert( res.second ) ; // succesful insert + std::swap( res.first->second, itWhat->second ); + } + }; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for map + template + struct move_item_policy< stdext::hash_map< Key, T, Traits, Alloc > > + { + typedef stdext::hash_map< Key, T, Traits, Alloc > map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + map.insert( std::move( *itWhat ) ); + } + }; +#endif + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// stdext::hash_map adapter for hash map bucket + template + class adapt< stdext::hash_map< Key, T, Traits, Alloc>, CDS_OPTIONS > + { + public: + typedef stdext::hash_map< Key, T, Traits, Alloc> container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::key_type key_type; + typedef typename container_type::mapped_type mapped_type; + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = false; + static bool const has_erase_with = false; + + private: + //@cond + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , CDS_OPTIONS + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy +#endif + >::type copy_item; + //@endcond + + private: + //@cond + container_type m_Map; + //@endcond + + public: + template + bool insert( const Q& key, Func f ) + { + std::pair res = m_Map.insert( value_type( key, mapped_type() ) ); + if ( res.second ) + ::cds::unref(f)( *res.first ); + return res.second; + } + + template + std::pair ensure( const Q& val, Func func ) + { + std::pair res = m_Map.insert( value_type( val, mapped_type() )); + ::cds::unref(func)( res.second, *res.first ); + return std::make_pair( true, res.second ); + } + + template + bool erase( const Q& key, Func f ) + { + iterator it = m_Map.find( key_type(key) ); + if ( it == m_Map.end() ) + return false; + ::cds::unref(f)( *it ); + m_Map.erase( it ); + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = m_Map.find( key_type(val) ); + if ( it == m_Map.end() ) + return false; + ::cds::unref(f)( *it, val ); + return true; + } + + void clear() + { + m_Map.clear(); + } + + iterator begin() { return m_Map.begin(); } + const_iterator begin() const { return m_Map.begin(); } + iterator end() { return m_Map.end(); } + const_iterator end() const { return m_Map.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + assert( m_Map.find( itWhat->first ) == m_Map.end() ); + copy_item()( m_Map, itWhat ); + } + + size_t size() const + { + return m_Map.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrusive::striped_set + + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_MAP_STD_HASH_MAP_MSVC_ADAPTER_H diff --git a/cds/container/striped_map/std_list.h b/cds/container/striped_map/std_list.h new file mode 100644 index 00000000..8df61dcb --- /dev/null +++ b/cds/container/striped_map/std_list.h @@ -0,0 +1,301 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_MAP_STD_LIST_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_MAP_STD_LIST_ADAPTER_H + +#include +#include +#include +#include // std::lower_bound +#include // std::pair + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for map + template + struct copy_item_policy< std::list< std::pair< K const, T >, Alloc > > + { + typedef std::pair< K const, T> pair_type; + typedef std::list< pair_type, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + list.insert( itInsert, *itWhat ); + } + }; + + // Swap policy for map + template + struct swap_item_policy< std::list< std::pair< K const, T >, Alloc > > + { + typedef std::pair< K const, T> pair_type; + typedef std::list< pair_type, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + pair_type newVal( itWhat->first, typename pair_type::second_type() ); + itInsert = list.insert( itInsert, newVal ); + std::swap( itInsert->second, itWhat->second ); + } + }; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for map + template + struct move_item_policy< std::list< std::pair< K const, T >, Alloc > > + { + typedef std::pair< K const, T> pair_type; + typedef std::list< pair_type, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + list.insert( itInsert, std::move( *itWhat ) ); + } + }; +#endif + } // namespace striped_set +}} // namespace cds:container + +namespace cds { namespace intrusive { namespace striped_set { + + /// std::list adapter for hash map bucket + template + class adapt< std::list< std::pair, Alloc>, CDS_OPTIONS > + { + public: + typedef std::list< std::pair, Alloc> container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename value_type::first_type key_type; + typedef typename value_type::second_type mapped_type; + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = true; + static bool const has_erase_with = true; + + private: + //@cond + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, CDS_OPTIONS >::type key_comparator; + + + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , CDS_OPTIONS + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy +#endif + >::type copy_item; + + struct find_predicate + { + bool operator()( value_type const& i1, value_type const& i2) const + { + return key_comparator()( i1.first, i2.first ) < 0; + } + + template + bool operator()( Q const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2.first ) < 0; + } + + template + bool operator()( value_type const& i1, Q const& i2) const + { + return key_comparator()( i1.first, i2 ) < 0; + } + }; + //@endcond + + private: + //@cond + container_type m_List; +# ifdef __GLIBCXX__ + // GCC C++ lib bug: + // In GCC (at least up to 4.7.x), the complexity of std::list::size() is O(N) + // (see http://gcc.gnu.org/bugzilla/show_bug.cgi?id=49561) + size_t m_nSize ; // list size +# endif + //@endcond + + public: + adapted_container() +# ifdef __GLIBCXX__ + : m_nSize(0) +# endif + {} + + template + bool insert( const Q& key, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate() ); + if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) { + //value_type newItem( key ); + it = m_List.insert( it, value_type( key, mapped_type()) ); + cds::unref( f )( *it ); + +# ifdef __GLIBCXX__ + ++m_nSize; +# endif + return true; + } + + // key already exists + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace( K&& key, Args&&... args ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate() ); + if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) { + //value_type newItem( key ); + it = m_List.emplace( it, value_type( std::forward(key), std::move( mapped_type( std::forward(args)...) )) ); + +# ifdef __GLIBCXX__ + ++m_nSize; +# endif + return true; + } + return false; + } +# endif + + template + std::pair ensure( const Q& key, Func func ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate() ); + if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) { + // insert new + value_type newItem( key, mapped_type() ); + it = m_List.insert( it, newItem ); + cds::unref( func )( true, *it ); +# ifdef __GLIBCXX__ + ++m_nSize; +# endif + return std::make_pair( true, true ); + } + else { + // already exists + cds::unref( func )( false, *it ); + return std::make_pair( true, false ); + } + } + + template + bool erase( Q const& key, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate() ); + if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) + return false; + + // key exists + cds::unref( f )( *it ); + m_List.erase( it ); +# ifdef __GLIBCXX__ + --m_nSize; +# endif + + return true; + } + + template + bool erase( Q const& key, Less pred, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, pred ); + if ( it == m_List.end() || pred( key, it->first ) || pred( it->first, key ) ) + return false; + + // key exists + cds::unref( f )( *it ); + m_List.erase( it ); +# ifdef __GLIBCXX__ + --m_nSize; +# endif + + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate() ); + if ( it == m_List.end() || key_comparator()( val, it->first ) != 0 ) + return false; + + // key exists + cds::unref( f )( *it, val ); + return true; + } + + template + bool find( Q& val, Less pred, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, pred ); + if ( it == m_List.end() || pred( val, it->first ) || pred( it->first, val ) ) + return false; + + // key exists + cds::unref( f )( *it, val ); + return true; + } + + void clear() + { + m_List.clear(); + } + + iterator begin() { return m_List.begin(); } + const_iterator begin() const { return m_List.begin(); } + iterator end() { return m_List.end(); } + const_iterator end() const { return m_List.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), *itWhat, find_predicate() ); + assert( it == m_List.end() || key_comparator()( itWhat->first, it->first ) != 0 ); + + copy_item()( m_List, it, itWhat ); +# ifdef __GLIBCXX__ + ++m_nSize; +# endif + } + + size_t size() const + { +# ifdef __GLIBCXX__ + return m_nSize; +# else + return m_List.size(); +# endif + + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrusive::striped_set + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_MAP_STD_LIST_ADAPTER_H diff --git a/cds/container/striped_map/std_map.h b/cds/container/striped_map/std_map.h new file mode 100644 index 00000000..728ebf08 --- /dev/null +++ b/cds/container/striped_map/std_map.h @@ -0,0 +1,190 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_MAP_STD_MAP_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_MAP_STD_MAP_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for map + template + struct copy_item_policy< std::map< Key, T, Traits, Alloc > > + { + typedef std::map< Key, T, Traits, Alloc > map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + map.insert( *itWhat ); + } + }; + + // Swap item policy + template + struct swap_item_policy< std::map< Key, T, Traits, Alloc > > + { + typedef std::map< Key, T, Traits, Alloc > map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + std::pair< typename map_type::iterator, bool > ret = map.insert( pair_type( itWhat->first, typename pair_type::second_type() )); + assert( ret.second ) ; // successful insertion + std::swap( ret.first->second, itWhat->second ); + } + }; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for map + template + struct move_item_policy< std::map< Key, T, Traits, Alloc > > + { + typedef std::map< Key, T, Traits, Alloc > map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + map.insert( std::move( *itWhat ) ); + } + }; +#endif + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// std::set adapter for hash set bucket + template + class adapt< std::map< Key, T, Traits, Alloc>, CDS_OPTIONS > + { + public: + typedef std::map< Key, T, Traits, Alloc> container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::key_type key_type; + typedef typename container_type::mapped_type mapped_type; + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = false; + static bool const has_erase_with = false; + + private: + //@cond + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , CDS_OPTIONS + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy +#endif + >::type copy_item; + //@endcond + + private: + //@cond + container_type m_Map; + //@endcond + + public: + + template + bool insert( const Q& key, Func f ) + { + std::pair res = m_Map.insert( value_type( key, mapped_type() ) ); + if ( res.second ) + ::cds::unref(f)( *res.first ); + return res.second; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace( Q&& key, Args&&... args ) + { +# if CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION < 40800 || CDS_COMPILER == CDS_COMPILER_CLANG && !defined(__LIBCPP_VERSION) + // GCC < 4.8: std::map has no "emplace" member function. Emulate it + std::pair res = m_Map.insert( value_type( std::forward(key), mapped_type( std::forward(args)...))); +# else + std::pair res = m_Map.emplace( std::forward(key), std::move(mapped_type( std::forward(args)...))); +# endif + return res.second; + } +# endif + + template + std::pair ensure( const Q& key, Func func ) + { + std::pair res = m_Map.insert( value_type( key, mapped_type() )); + cds::unref(func)( res.second, *res.first ); + return std::make_pair( true, res.second ); + } + + template + bool erase( const Q& key, Func f ) + { + iterator it = m_Map.find( key_type(key) ); + if ( it == m_Map.end() ) + return false; + cds::unref(f)( *it ); + m_Map.erase( it ); + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = m_Map.find( key_type(val) ); + if ( it == m_Map.end() ) + return false; + cds::unref(f)( *it, val ); + return true; + } + + /// Clears the container + void clear() + { + m_Map.clear(); + } + + iterator begin() { return m_Map.begin(); } + const_iterator begin() const { return m_Map.begin(); } + iterator end() { return m_Map.end(); } + const_iterator end() const { return m_Map.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + assert( m_Map.find( itWhat->first ) == m_Map.end() ); + copy_item()( m_Map, itWhat ); + } + + size_t size() const + { + return m_Map.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + }; +}}} // namespace cds::intrusive::striped_set + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_MAP_STD_MAP_ADAPTER_H diff --git a/cds/container/striped_set.h b/cds/container/striped_set.h new file mode 100644 index 00000000..83c95758 --- /dev/null +++ b/cds/container/striped_set.h @@ -0,0 +1,1009 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_SET_H +#define __CDS_CONTAINER_STRIPED_SET_H + +#include +#include + +namespace cds { namespace container { + + /// Striped hash set + /** @ingroup cds_nonintrusive_set + + Source + - [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming" + + Lock striping is very simple technique. + The set consists of the bucket table and the array of locks. + Initially, the capacity of lock array and bucket table is the same. + When set is resized, bucket table capacity will be doubled but lock array will not. + The lock \p i protects each bucket \p j, where j = i mod L , + where \p L - the size of lock array. + + Template arguments: + - \p Container - the container class that is used as bucket table entry. The \p Container class should support + an uniform interface described below. + - \p Options - options + + The \p %StripedSet class does not exactly dictate the type of container that should be used as a \p Container bucket. + Instead, the class supports different container type for the bucket, for exampe, \p std::list, \p std::set and others. + + Remember that \p %StripedSet class algorithm ensures sequential blocking access to its bucket through the mutex type you specify + among \p Options template arguments. + + The \p Options are: + - opt::mutex_policy - concurrent access policy. + Available policies: intrusive::striped_set::striping, intrusive::striped_set::refinable. + Default is %striped_set::striping. + - opt::hash - hash functor. Default option value see opt::v::hash_selector which selects default hash functor for + your compiler. + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::item_counter - item counter type. Default is \p atomicity::item_counter since some operation on the counter is performed + without locks. Note that item counting is an essential part of the set algorithm, so dummy type like atomicity::empty_item_counter + is not suitable. + - opt::allocator - the allocator type using for memory allocation of bucket table and lock array. Default is CDS_DEFAULT_ALLOCATOR. + - opt::resizing_policy - the resizing policy that is a functor that decides when to resize the hash set. + Default option value depends on bucket container type: + for sequential containers like \p std::list, \p std::vector the resizing policy is striped_set::load_factor_resizing<4>; + for other type of containers like \p std::set, \p std::unordered_set the resizing policy is striped_set::no_resizing. + See \ref striped_set namespace for list of all possible types of the option. + Note that the choose of resizing policy depends of \p Container type: + for sequential containers like \p std::list, \p std::vector and so on, right choosing of the policy can + significantly improve performance. + For other, non-sequential types of \p Container (like a \p std::set) + the resizing policy is not so important. + - opt::copy_policy - the copy policy which is used to copy items from the old set to the new one when resizing. + The policy can be optionally used in adapted bucket container for performance reasons of resizing. + The detail of copy algorithm depends on type of bucket container and explains below. + + opt::compare or opt::less options are used in some \p Container class for searching an item. + opt::compare option has the highest priority: if opt::compare is specified, opt::less is not used. + + You can pass other option that would be passed to adapt metafunction, see below. + + Internal details + + The \p %StripedSet class cannot utilize the \p Container container specified directly, but only its adapted variant which + supports an unified interface. Internally, the adaptation is made via striped_set::adapt metafunction that wraps bucket container + and provides the unified bucket interface suitable for \p %StripedSet. Such adaptation is completely transparent for you - + you don't need to call \p adapt metafunction directly, \p %StripedSet class's internal machinery itself invokes appropriate + \p adapt metafunction to adjust your \p Container container class to \p %StripedSet bucket's internal interface. + All you need is to include a right header before striped_hash_set.h. + + By default, striped_set::adapt metafunction does not make any wrapping to \p AnyContainer, + so, the result %striped_set::adapt::type is the same as \p AnyContainer. + However, there are a lot of specializations of striped_set::adapt for well-known containers, see table below. + Any of this specialization wraps corresponding container making it suitable for the set's bucket. + Remember, you should include the proper header file for \p adapt before including striped_hash_set.h. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Container.h-file for \p adaptExampleNotes
\p std::list\code + #include + #include + typedef cds::container::StripedSet< + std::list, + cds::opt::less< std::less > + > striped_set; + \endcode + + The list is ordered. + Template argument pack \p Options must contain cds::opt::less or cds::opt::compare for type \p T stored in the list +
\p std::vector\code + #include + #include + typedef cds::container::StripedSet< + std::vector, + cds::opt::less< std::less > + > striped_set; + \endcode + + The vector is ordered. + Template argument pack \p Options must contain cds::opt::less or cds::opt::compare for type \p T stored in the list +
\p std::set\code + #include + #include + typedef cds::container::StripedSet< + std::set< T, std::less > + > striped_set; + \endcode + +
\p std::unordered_set\code + #include + #include + typedef cds::container::StripedSet< + std::unordered_set< + T, + hash, + equal + > + > striped_set; + \endcode + + You should provide two different hash function \p h1 and \p h2 - one for std::unordered_set and other for \p %StripedSet. + For the best result, \p h1 and \p h2 must be orthogonal i.e. h1(X) != h2(X) for any value \p X. +
\p stdext::hash_set (only for MS VC++ 2008)\code + #include + #include + typedef cds::container::StripedSet< + stdext::hash_set< T, + stdext::hash_compare< + T, + std::less + > + > + > striped_set; + \endcode + + You should provide two different hash function \p h1 and \p h2 - one for stdext::hash_set and other for \p %StripedSet. + For the best result, \p h1 and \p h2 must be orthogonal i.e. h1(X) != h2(X) for any value \p X. +
\p boost::container::slist\code + #include + #include + typedef cds::container::StripedSet< + boost::container::slist + > striped_set; + \endcode + + The list is ordered. + \p Options must contain cds::opt::less or cds::opt::compare. +
\p boost::container::list\code + #include + #include + typedef cds::container::StripedSet< + boost::container::list + > striped_set; + \endcode + + The list is ordered. + \p Options must contain cds::opt::less or cds::opt::compare. +
\p boost::container::vector\code + #include + #include + typedef cds::container::StripedSet< + boost::container::vector, + cds::opt::less< std::less > + > striped_set; + \endcode + + The vector is ordered. + Template argument pack \p Options must contain cds::opt::less or cds::opt::compare for type \p T stored in the list +
\p boost::container::stable_vector\code + #include + #include + typedef cds::container::StripedSet< + boost::container::stable_vector, + cds::opt::less< std::less > + > striped_set; + \endcode + + The vector is ordered. + Template argument pack \p Options must contain cds::opt::less or cds::opt::compare for type \p T stored in the list +
\p boost::container::set\code + #include + #include + typedef cds::container::StripedSet< + boost::container::set< T, std::less > + > striped_set; + \endcode + +
\p boost::container::flat_set\code + #include + #include + typedef cds::container::StripedSet< + boost::container::flat_set< T, std::less > + > striped_set; + \endcode + +
\p boost::unordered_set\code + #include + #include + typedef cds::container::StripedSet< + boost::unordered_set< + T, + hash, + equal + > + > striped_set; + \endcode + + You should provide two different hash function \p h1 and \p h2 - one for boost::unordered_set and other for \p %StripedSet. + For the best result, \p h1 and \p h2 must be orthogonal i.e. h1(X) != h2(X) for any value \p X. +
+ + You can use another container type as set's bucket. + Suppose, you have a container class \p MyBestContainer and you want to integrate it with \p %StripedSet as bucket type. + There are two possibility: + - either your \p MyBestContainer class has native support of bucket's interface; + in this case, you can use default striped_set::adapt metafunction; + - or your \p MyBestContainer class does not support bucket's interface, which means, that you should develop a specialization + cds::container::striped_set::adapt metafunction providing necessary interface. + + The striped_set::adapt< Container, Options... > metafunction has two template argument: + - \p Container is the class that should be used as the bucket, for example, std::list< T >. + - \p Options pack is the options from \p %StripedSet declaration. The \p adapt metafunction can use + any option from \p Options for its internal use. For example, a \p compare option can be passed to \p adapt + metafunction via \p Options argument of \p %StripedSet declaration. + + See striped_set::adapt metafunction for the description of interface that the bucket container must provide + to be %StripedSet compatible. + + Copy policy + There are three predefined copy policy: + - \p cds::container::striped_set::copy_item - copy item from old bucket to new one when resizing using copy ctor. It is default policy for + any compiler that do not support move semantics + - \p cds::container::striped_set::move_item - move item from old bucket to new one when resizing using move semantics. It is default policy for + any compiler that support move semantics. If compiler does not support move semantics, the move policy is the same as \p copy_item + - \p cds::container::striped_set::swap_item - copy item from old bucket to new one when resizing using \p std::swap. Not all containers support + this copy policy, see details in table below. + + You can define your own copy policy specifically for your case. + Note, right copy policy can significantly improve the performance of resizing. + + + + + + + + + + + + + + + + + +
ContainerPolicies
+ - \p std::list + - \p std::vector + - \p boost::list + - \p boost::vector + - \p boost::stable_vector + \code + struct copy_item { + void operator()( std::list& list, std::list::iterator itInsert, std::list::iterator itWhat ) + { + list.insert( itInsert, *itWhat ); + } + } \endcode + + \code + // The type T stored in the list must be swappable + struct swap_item { + void operator()( std::list& list, std::list::iterator itInsert, std::list::iterator itWhat ) + { + std::swap( *list.insert( itInsert, T() ), *itWhat ); + } + } \endcode + + \code + struct move_item { + void operator()( std::list& list, std::list::iterator itInsert, std::list::iterator itWhat ) + { + list.insert( itInsert, std::move( *itWhat ) ); + } + } \endcode +
+ - \p std::set + - \p std::unordered_set + - \p stdext::hash_set (only for MS VC++ 2008) + \code + struct copy_item { + void operator()( std::set& set, std::set::iterator itWhat ) + { + set.insert( *itWhat ); + } + } \endcode + \p swap_item is not applicable (same as \p copy_item) + + \code + struct move_item { + void operator()( std::set& set, std::set::iterator itWhat ) + { + set.insert( std::move( *itWhat )); + } + } \endcode +
+ - \p boost::container::slist + \code + struct copy_item { + void operator()( bc::slist& list, bc::slist::iterator itInsert, bc::slist::iterator itWhat ) + { + list.insert_after( itInsert, *itWhat ); + } + } \endcode + + \code + // The type T stored in the list must be swappable + struct swap_item { + void operator()( bc::slist& list, bc::slist::iterator itInsert, bc::slist::iterator itWhat ) + { + std::swap( *list.insert_after( itInsert, T() ), *itWhat ); + } + } \endcode + + \code + struct move_item { + void operator()( bc::slist& list, bc::slist::iterator itInsert, bc::slist::iterator itWhat ) + { + list.insert_after( itInsert, std::move( *itWhat ) ); + } + } \endcode +
+ + Advanced functions + + libcds provides some advanced functions like \p erase_with, \p find_with, + that cannot be supported by all underlying containers. + The table below shows whether underlying container supports those functions + (the sign "+" means "container supports the function"): + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Container\p find_with\p erse_with
\p std::list++
\p std::vector++
\p std::set--
\p std::unordered_set--
\p stdext::hash_set (only for MS VC++ 2008)--
\p boost::container::slist++
\p boost::container::list++
\p boost::container::vector++
\p boost::container::stable_vector++
\p boost::container::set--
\p boost::container::flat_set--
\p boost::unordered_set--
+ */ + template + class StripedSet: protected intrusive::StripedSet + { + //@cond + typedef intrusive::StripedSet base_class; + //@endcond + public: + //@cond + typedef typename base_class::default_options default_options; + typedef typename base_class::options options; + //@endcond + + typedef Container underlying_container_type ; ///< original intrusive container type for the bucket + typedef typename base_class::bucket_type bucket_type ; ///< container type adapted for hash set + typedef typename bucket_type::value_type value_type ; ///< value type stored in the set + + typedef typename base_class::hash hash ; ///< Hash functor + typedef typename base_class::item_counter item_counter ; ///< Item counter + typedef typename base_class::resizing_policy resizing_policy ; ///< Resizing policy + typedef typename base_class::allocator_type allocator_type ; ///< allocator type specified in options. + typedef typename base_class::mutex_policy mutex_policy ; ///< Mutex policy + + protected: + //@cond + typedef typename base_class::scoped_cell_lock scoped_cell_lock; + typedef typename base_class::scoped_full_lock scoped_full_lock; + typedef typename base_class::scoped_resize_lock scoped_resize_lock; + //@endcond + + protected: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_insert_functor { + void operator()( value_type& ) + {} + }; + + struct empty_erase_functor { + void operator()( value_type const& ) + {} + }; + + struct empty_find_functor { + template + void operator()( value_type& item, Q& val ) + {} + }; +# endif + //@endcond + + public: + /// Default ctor. The initial capacity is 16. + StripedSet() + : base_class() + {} + + /// Ctor with initial capacity specified + StripedSet( + size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. + ) : base_class( nCapacity ) + {} + + /// Ctor with resizing policy (copy semantics) + /** + This constructor initializes m_ResizingPolicy member with copy of \p resizingPolicy parameter + */ + StripedSet( + size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. + ,resizing_policy const& resizingPolicy ///< Resizing policy + ) : base_class( nCapacity, resizingPolicy ) + {} + +#ifdef CDS_RVALUE_SUPPORT + /// Ctor with resizing policy (move semantics) + /** + This constructor initializes m_ResizingPolicy member moving \p resizingPolicy parameter + Move semantics is used. Available only for the compilers that supports C++11 rvalue reference. + */ + StripedSet( + size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. + ,resizing_policy&& resizingPolicy ///< Resizing policy + ) : base_class( nCapacity, std::forward(resizingPolicy) ) + {} +#endif + + /// Destructor destroys internal data + ~StripedSet() + {} + + public: + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain as minimum the complete key for the node. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q const& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return insert( val, []( value_type& ) {} ); +# else + return insert( val, empty_insert_functor() ); +# endif + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of new item . + + The functor signature is: + \code + void func( value_type& item ); + \endcode + where \p item is the item inserted. + + The type \p Q can differ from \ref value_type of items storing in the set. + Therefore, the \p value_type should be constructible from type \p Q. + + The user-defined functor is called only if the inserting is success. It can be passed by reference + using boost::ref + */ + template + bool insert( Q const& val, Func f ) + { + bool bOk; + bool bResize; + size_t nHash = base_class::hashing( val ); + bucket_type * pBucket; + { + scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); + pBucket = base_class::bucket( nHash ); + bOk = pBucket->insert( val, f ); + bResize = bOk && base_class::m_ResizingPolicy( ++base_class::m_ItemCounter, *this, *pBucket ); + } + + if ( bResize ) + base_class::resize(); + return bOk; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Inserts data of type \p %value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + bool bOk; + bool bResize; + size_t nHash = base_class::hashing( value_type( std::forward(args)...)); + bucket_type * pBucket; + { + scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); + pBucket = base_class::bucket( nHash ); + + bOk = pBucket->emplace( std::forward(args)...); + bResize = bOk && base_class::m_ResizingPolicy( ++base_class::m_ItemCounter, *this, *pBucket ); + } + + if ( bResize ) + base_class::resize(); + return bOk; + } +# endif + + /// Ensures that the \p val exists in the set + /** + The operation performs inserting or changing data. + + If the \p val key not found in the set, then the new item created from \p val + is inserted into the set. Otherwise, the functor \p func is called with the item found. + The functor \p Func should be a function with signature: + \code + void func( bool bNew, value_type& item, const Q& val ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p val passed into the \p ensure function + + The functor can change non-key fields of the \p item. + + You can pass \p func argument by value or by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p val key + already exists. + */ + template + std::pair ensure( Q const& val, Func func ) + { + std::pair result; + bool bResize; + size_t nHash = base_class::hashing( val ); + bucket_type * pBucket; + { + scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); + pBucket = base_class::bucket( nHash ); + + result = pBucket->ensure( val, func ); + bResize = result.first && result.second && base_class::m_ResizingPolicy( ++base_class::m_ItemCounter, *this, *pBucket ); + } + + if ( bResize ) + base_class::resize(); + return result; + } + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_StripedSet_erase + + The set item comparator should be able to compare the type \p value_type and the type \p Q. + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase( key, [](value_type const&) {} ); +# else + return erase( key, empty_erase_functor() ); +# endif + } + +#ifdef CDS_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS_SUPPORT + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_StripedSet_erase "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + + @note This function is enabled if the compiler supports C++11 + default template arguments for function template and the underlying container + supports \p %erase_with feature. + */ + template < typename Q, typename Less + ,typename Bucket = bucket_type, typename = typename std::enable_if< Bucket::has_erase_with >::type > + bool erase_with( Q const& key, Less pred ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_with( key, pred, [](value_type const&) {} ); +# else + return erase_with( key, pred, empty_erase_functor() ); +# endif + } +#endif + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_StripedSet_erase_func + + The function searches an item with key \p key, calls \p f functor with item found + and deletes it. If \p key is not found, the functor is not called. + + The functor \p Func interface is: + \code + struct functor { + void operator()(value_type const& val); + }; + \endcode + The functor can be passed by value or by reference using boost:ref + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { + bool bOk; + size_t nHash = base_class::hashing( key ); + { + scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); + bucket_type * pBucket = base_class::bucket( nHash ); + + bOk = pBucket->erase( key, f ); + } + + if ( bOk ) + --base_class::m_ItemCounter; + return bOk; + } + +#ifdef CDS_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS_SUPPORT + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_StripedSet_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + + @note This function is enabled if the compiler supports C++11 + default template arguments for function template and the underlying container + supports \p %erase_with feature. + */ + template < typename Q, typename Less, typename Func + , typename Bucket = bucket_type, typename = typename std::enable_if< Bucket::has_erase_with >::type > + bool erase_with( Q const& key, Less pred, Func f ) + { + bool bOk; + size_t nHash = base_class::hashing( key ); + { + scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); + bucket_type * pBucket = base_class::bucket( nHash ); + + bOk = pBucket->erase( key, pred, f ); + } + + if ( bOk ) + --base_class::m_ItemCounter; + return bOk; + } +#endif + + /// Find the key \p val + /** \anchor cds_nonintrusive_StripedSet_find_func + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + The type \p Q can differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return base_class::find( val, f ); + } + +#ifdef CDS_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS_SUPPORT + /// Find the key \p val using \p pred predicate + /** + The function is an analog of \ref cds_nonintrusive_StripedSet_find_func "find(Q&, Func)" + but \p pred is used for key comparing + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + + @note This function is enabled if the compiler supports C++11 + default template arguments for function template and the underlying container + supports \p %find_with feature. + */ + template ::type > + bool find_with( Q& val, Less pred, Func f ) + { + return base_class::find_with( val, pred, f ); + } +#endif + + /// Find the key \p val + /** \anchor cds_nonintrusive_StripedSet_find_cfunc + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. + + The type \p Q can differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return base_class::find( val, f ); + } + +#ifdef CDS_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS_SUPPORT + /// Find the key \p val using \p pred predicate + /** + The function is an analog of \ref cds_nonintrusive_StripedSet_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + + @note This function is enabled if the compiler supports C++11 + default template arguments for function template and the underlying container + supports \p %find_with feature. + */ + template ::type > + bool find_with( Q const& val, Less pred, Func f ) + { + return base_class::find_with( val, pred, f ); + } +#endif + + /// Find the key \p val + /** \anchor cds_nonintrusive_StripedSet_find_val + + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \ref value_type. + */ + template + bool find( Q const& val ) + { + return base_class::find( val ); + } + +#ifdef CDS_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS_SUPPORT + /// Find the key \p val using \p pred predicate + /** + The function is an analog of \ref cds_nonintrusive_StripedSet_find_val "find(Q const&)" + but \p pred is used for key comparing + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + + @note This function is enabled if the compiler supports C++11 + default template arguments for function template and the underlying container + supports \p %find_with feature. + */ + template ::type > + bool find_with( Q const& val, Less pred ) + { + return base_class::find_with( val, pred ); + } +#endif + + /// Clears the set + /** + The function erases all items from the set. + */ + void clear() + { + return base_class::clear(); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + size_t size() const + { + return base_class::size(); + } + + /// Returns the size of hash table + /** + The hash table size is non-constant and can be increased via resizing. + */ + size_t bucket_count() const + { + return base_class::bucket_count(); + } + + /// Returns lock array size + size_t lock_count() const + { + return base_class::lock_count(); + } + + /// Returns resizing policy object + resizing_policy& get_resizing_policy() + { + return base_class::get_resizing_policy(); + } + + /// Returns resizing policy (const version) + resizing_policy const& get_resizing_policy() const + { + return base_class::get_resizing_policy(); + } + }; + +}} // namespace cds::container + + +#endif // #ifndef __CDS_CONTAINER_STRIPED_SET_H diff --git a/cds/container/striped_set/adapter.h b/cds/container/striped_set/adapter.h new file mode 100644 index 00000000..267305b3 --- /dev/null +++ b/cds/container/striped_set/adapter.h @@ -0,0 +1,491 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_SET_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_SET_ADAPTER_H + +#include +#include + +namespace cds { namespace container { + /// Striped hash set related definitions + namespace striped_set { + + //@cond + struct copy_item ; // copy_item_policy tag + template + struct copy_item_policy; + + struct swap_item ; // swap_item_policy tag + template + struct swap_item_policy; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + struct move_item ; // move_item_policy tag + template + struct move_item_policy; +#else + typedef copy_item move_item ; // if move semantics is not supported, move_item is synonym for copy_item +#endif + //@endcond + +#ifdef CDS_DOXYGEN_INVOKED + /// Default adapter for hash set + /** + By default, the metafunction does not make any transformation for container type \p Container. + \p Container should provide interface suitable for the hash set. + + The \p Options template argument contains a list of options + that has been passed to cds::container::StripedSet. + + Bucket interface + + The result of metafunction is a container (a bucket) that should support the following interface: + + Public typedefs that the bucket should provide: + - \p value_type - the type of the item in the bucket + - \p iterator - bucket's item iterator + - \p const_iterator - bucket's item constant iterator + - \p default_resizing_policy - defalt resizing policy preferable for the container. + By default, the library defines striped_set::load_factor_resizing<4> for sequential containers like + std::list, std::vector, and striped_set::no_resizing for ordered container like std::set, + std::unordered_set. + + Insert value \p val of type \p Q + \code template bool insert( const Q& val, Func f ) ; \endcode + The function allows to split creating of new item into two part: + - create item with key only from \p val + - try to insert new item into the container + - if inserting is success, calls \p f functor to initialize value-field of the new item. + + The functor signature is: + \code + void func( value_type& item ); + \endcode + where \p item is the item inserted. + + The type \p Q can differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q and constructible from type \p Q, + + The user-defined functor is called only if the inserting is success. It can be passed by reference + using boost::ref +
+ + Inserts data of type \ref value_type constructed with std::forward(args)... + \code template bool emplace( Args&&... args ) ; \endcode + Returns \p true if inserting successful, \p false otherwise. + + This function should be available only for compiler that supports + variadic template and move semantics +
+ + Ensures that the \p item exists in the container + \code template std::pair ensure( const Q& val, Func func ) \endcode + The operation performs inserting or changing data. + + If the \p val key not found in the container, then the new item created from \p val + is inserted. Otherwise, the functor \p func is called with the item found. + The \p Func functor has interface: + \code + void func( bool bNew, value_type& item, const Q& val ); + \endcode + or like a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + + where arguments are: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - container's item + - \p val - argument \p val passed into the \p ensure function + + The functor can change non-key fields of the \p item. + + The type \p Q can differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q and constructible from type \p Q, + + You can pass \p func argument by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p val key + already exists. +
+ + + Delete \p key + \code template bool erase( const Q& key, Func f ) \endcode + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface is: + \code + struct extractor { + void operator()(value_type const& val); + }; + \endcode + The functor can be passed by reference using boost:ref + + The type \p Q can differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q. + + Return \p true if key is found and deleted, \p false otherwise +
+ + + Find the key \p val + \code template bool find( Q& val, Func f ) \endcode + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + The type \p Q can differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q. + + The function returns \p true if \p val is found, \p false otherwise. +
+ + Clears the container + \code void clear() \endcode +
+ + Get size of bucket + \code size_t size() const \endcode + This function can be required by some resizing policy +
+ + Move item when resizing + \code void move_item( adapted_container& from, iterator it ) \endcode + This helper function is invented for the set resizing when the item + pointed by \p it iterator is copied from an old bucket \p from to a new bucket + pointed by \p this. +
+ + */ + template < typename Container, CDS_DECL_OPTIONS > + class adapt + { + public: + typedef Container type ; ///< adapted container type + typedef typename type::value_type value_type ; ///< value type stored in the container + }; +#else // CDS_DOXYGEN_INVOKED + using cds::intrusive::striped_set::adapt; +#endif + + //@cond + using cds::intrusive::striped_set::adapted_sequential_container; + using cds::intrusive::striped_set::adapted_container; + + using cds::intrusive::striped_set::load_factor_resizing; + using cds::intrusive::striped_set::single_bucket_size_threshold; + using cds::intrusive::striped_set::no_resizing; + + using cds::intrusive::striped_set::striping; + using cds::intrusive::striped_set::refinable; + //@endcond + + //@cond + namespace details { + + template + struct boost_set_copy_policies + { + struct copy_item_policy + { + typedef Set set_type; + typedef typename set_type::iterator iterator; + + void operator()( set_type& set, iterator itWhat ) + { + set.insert( *itWhat ); + } + }; + + typedef copy_item_policy swap_item_policy; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + struct move_item_policy + { + typedef Set set_type; + typedef typename set_type::iterator iterator; + + void operator()( set_type& set, iterator itWhat ) + { + set.insert( std::move( *itWhat ) ); + } + }; +#endif + }; + + template + class boost_set_adapter: public striped_set::adapted_container + { + public: + typedef Set container_type; + + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = false; + static bool const has_erase_with = false; + + private: + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , CDS_OPTIONS + >::type + >::copy_policy + , cds::container::striped_set::copy_item, copy_item_policy + , cds::container::striped_set::swap_item, swap_item_policy +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + , cds::container::striped_set::move_item, move_item_policy +#endif + >::type copy_item; + + private: + container_type m_Set; + + public: + boost_set_adapter() + {} + + container_type& base_container() + { + return m_Set; + } + + template + bool insert( const Q& val, Func f ) + { + std::pair res = m_Set.insert( value_type(val) ); + if ( res.second ) + cds::unref(f)( const_cast(*res.first) ); + return res.second; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace( Args&&... args ) + { + std::pair res = m_Set.emplace( std::forward(args)... ); + return res.second; + } +# endif + + template + std::pair ensure( const Q& val, Func func ) + { + std::pair res = m_Set.insert( value_type(val) ); + cds::unref(func)( res.second, const_cast(*res.first), val ); + return std::make_pair( true, res.second ); + } + + template + bool erase( const Q& key, Func f ) + { + const_iterator it = m_Set.find( value_type(key) ); + if ( it == m_Set.end() ) + return false; + cds::unref(f)( const_cast(*it) ); + m_Set.erase( it ); + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = m_Set.find( value_type(val) ); + if ( it == m_Set.end() ) + return false; + cds::unref(f)( const_cast(*it), val ); + return true; + } + + void clear() + { + m_Set.clear(); + } + + iterator begin() { return m_Set.begin(); } + const_iterator begin() const { return m_Set.begin(); } + iterator end() { return m_Set.end(); } + const_iterator end() const { return m_Set.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + assert( m_Set.find( *itWhat ) == m_Set.end() ); + copy_item()( m_Set, itWhat ); + } + + size_t size() const + { + return m_Set.size(); + } + }; + + template + struct boost_map_copy_policies { + struct copy_item_policy { + typedef Map map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + map.insert( *itWhat ); + } + }; + + struct swap_item_policy { + typedef Map map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + std::pair< iterator, bool > ret = map.insert( pair_type( itWhat->first, typename pair_type::second_type() )); + assert( ret.second ) ; // successful insertion + std::swap( ret.first->second, itWhat->second ); + } + }; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + struct move_item_policy { + typedef Map map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + map.insert( std::move( *itWhat ) ); + } + }; +#endif + }; + + template + class boost_map_adapter: public striped_set::adapted_container + { + public: + typedef Map container_type; + + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::key_type key_type; + typedef typename container_type::mapped_type mapped_type; + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = false; + static bool const has_erase_with = false; + + private: + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , CDS_OPTIONS + >::type + >::copy_policy + , cds::container::striped_set::copy_item, copy_item_policy + , cds::container::striped_set::swap_item, swap_item_policy +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + , cds::container::striped_set::move_item, move_item_policy +#endif + >::type copy_item; + + private: + container_type m_Map; + + public: + template + bool insert( const Q& key, Func f ) + { + std::pair res = m_Map.insert( value_type( key, mapped_type() ) ); + if ( res.second ) + cds::unref(f)( *res.first ); + return res.second; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace( Q&& key, Args&&... args ) + { + std::pair res = m_Map.emplace( std::forward(key), std::move( mapped_type( std::forward(args)...))); + return res.second; + } +# endif + + template + std::pair ensure( const Q& val, Func func ) + { + std::pair res = m_Map.insert( value_type( val, mapped_type() )); + cds::unref(func)( res.second, *res.first ); + return std::make_pair( true, res.second ); + } + + template + bool erase( const Q& key, Func f ) + { + iterator it = m_Map.find( key_type(key) ); + if ( it == m_Map.end() ) + return false; + cds::unref(f)( *it ); + m_Map.erase( it ); + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = m_Map.find( key_type(val) ); + if ( it == m_Map.end() ) + return false; + cds::unref(f)( *it, val ); + return true; + } + + void clear() + { + m_Map.clear(); + } + + iterator begin() { return m_Map.begin(); } + const_iterator begin() const { return m_Map.begin(); } + iterator end() { return m_Map.end(); } + const_iterator end() const { return m_Map.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + assert( m_Map.find( itWhat->first ) == m_Map.end() ); + copy_item()( m_Map, itWhat ); + } + + size_t size() const + { + return m_Map.size(); + } + }; + + } // namespace details + //@endcond + + } // namespace striped_set +}} // namespace cds::container + + +#endif // #ifndef __CDS_CONTAINER_STRIPED_SET_ADAPTER_H diff --git a/cds/container/striped_set/boost_flat_set.h b/cds/container/striped_set/boost_flat_set.h new file mode 100644 index 00000000..d1d88403 --- /dev/null +++ b/cds/container/striped_set/boost_flat_set.h @@ -0,0 +1,57 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_SET_BOOST_FLAT_SET_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_SET_BOOST_FLAT_SET_ADAPTER_H + +#include +#if BOOST_VERSION < 104800 +# error "For boost::container::flat_set you must use boost 1.48 or above" +#endif + +#include +#include + +//#if CDS_COMPILER == CDS_COMPILER_MSVC && CDS_COMPILER_VERSION >= 1700 +//# error "boost::container::flat_set is not compatible with MS VC++ 11" +//#endif + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for boost::container::flat_set + template + struct copy_item_policy< boost::container::flat_set< T, Traits, Alloc > > + : public details::boost_set_copy_policies< boost::container::flat_set< T, Traits, Alloc > >::copy_item_policy + {}; + + // Swap policy is not defined for boost::container::flat_set + template + struct swap_item_policy< boost::container::flat_set< T, Traits, Alloc > > + : public details::boost_set_copy_policies< boost::container::flat_set< T, Traits, Alloc > >::swap_item_policy + {}; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for boost::container::flat_set + template + struct move_item_policy< boost::container::flat_set< T, Traits, Alloc > > + : public details::boost_set_copy_policies< boost::container::flat_set< T, Traits, Alloc > >::move_item_policy + {}; +#endif + + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + template + class adapt< boost::container::flat_set, CDS_OPTIONS > + { + public: + typedef boost::container::flat_set container_type ; ///< underlying container type + typedef cds::container::striped_set::details::boost_set_adapter< container_type, CDS_OPTIONS > type; + }; +}}} + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_SET_BOOST_FLAT_SET_ADAPTER_H diff --git a/cds/container/striped_set/boost_list.h b/cds/container/striped_set/boost_list.h new file mode 100644 index 00000000..ffc661af --- /dev/null +++ b/cds/container/striped_set/boost_list.h @@ -0,0 +1,266 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_SET_BOOST_LIST_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_SET_BOOST_LIST_ADAPTER_H + +#include +#if BOOST_VERSION < 104800 +# error "For boost::container::list you must use boost 1.48 or above" +#endif + +#include +#include +#include +#include // std::lower_bound + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for boost::container::list + template + struct copy_item_policy< boost::container::list< T, Alloc > > + { + typedef boost::container::list< T, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + itInsert = list.insert( itInsert, *itWhat ); + } + }; + + // Swap policy for boost::container::list + template + struct swap_item_policy< boost::container::list< T, Alloc > > + { + typedef boost::container::list< T, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + typename list_type::value_type newVal; + itInsert = list.insert( itInsert, newVal ); + std::swap( *itWhat, *itInsert ); + } + }; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for boost::container::list + template + struct move_item_policy< boost::container::list< T, Alloc > > + { + typedef boost::container::list< T, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + list.insert( itInsert, std::move( *itWhat ) ); + } + }; +#endif + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// boost::container::list adapter for hash set bucket + template + class adapt< boost::container::list, CDS_OPTIONS > + { + public: + typedef boost::container::list container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = true; + static bool const has_erase_with = true; + + private: + //@cond + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, CDS_OPTIONS >::type key_comparator; + + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , CDS_OPTIONS + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy +#endif + >::type copy_item; + + struct find_predicate + { + bool operator()( value_type const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( Q const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( value_type const& i1, Q const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + }; + //@endcond + + private: + //@cond + container_type m_List; + //@endcond + + public: + adapted_container() + {} + + template + bool insert( Q const& val, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate() ); + if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { + value_type newItem( val ); + it = m_List.insert( it, newItem ); + cds::unref( f )( *it ); + + return true; + } + + // key already exists + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace( Args&&... args ) + { + value_type val( std::forward(args)... ); + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate() ); + if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { + m_List.emplace( it, std::move( val ) ); + return true; + } + return false; + } +# endif + + template + std::pair ensure( Q const& val, Func func ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate() ); + if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { + // insert new + value_type newItem( val ); + it = m_List.insert( it, newItem ); + cds::unref( func )( true, *it, val ); + return std::make_pair( true, true ); + } + else { + // already exists + cds::unref( func )( false, *it, val ); + return std::make_pair( true, false ); + } + } + + template + bool erase( Q const& key, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate() ); + if ( it == m_List.end() || key_comparator()( key, *it ) != 0 ) + return false; + + // key exists + cds::unref( f )( *it ); + m_List.erase( it ); + + return true; + } + + template + bool erase( Q const& key, Less pred, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, pred ); + if ( it == m_List.end() || pred( key, *it ) || pred( *it, key ) ) + return false; + + // key exists + cds::unref( f )( *it ); + m_List.erase( it ); + + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate() ); + if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) + return false; + + // key exists + cds::unref( f )( *it, val ); + return true; + } + + template + bool find( Q& val, Less pred, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, pred ); + if ( it == m_List.end() || pred( val, *it ) || pred( *it, val ) ) + return false; + + // key exists + cds::unref( f )( *it, val ); + return true; + } + + /// Clears the container + void clear() + { + m_List.clear(); + } + + iterator begin() { return m_List.begin(); } + const_iterator begin() const { return m_List.begin(); } + iterator end() { return m_List.end(); } + const_iterator end() const { return m_List.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), *itWhat, find_predicate() ); + assert( it == m_List.end() || key_comparator()( *itWhat, *it ) != 0 ); + + copy_item()( m_List, it, itWhat ); + } + + size_t size() const + { + return m_List.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrsive::striped_set +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_SET_BOOST_LIST_ADAPTER_H diff --git a/cds/container/striped_set/boost_set.h b/cds/container/striped_set/boost_set.h new file mode 100644 index 00000000..e26bf31f --- /dev/null +++ b/cds/container/striped_set/boost_set.h @@ -0,0 +1,56 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_SET_BOOST_SET_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_SET_BOOST_SET_ADAPTER_H + +#include +#if BOOST_VERSION < 104800 +# error "For boost::container::set you must use boost 1.48 or above" +#endif + +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for boost::container::set + template + struct copy_item_policy< boost::container::set< T, Traits, Alloc > > + : public details::boost_set_copy_policies< boost::container::set< T, Traits, Alloc > >::copy_item_policy + {}; + // Copy policy for boost::container::set + template + struct swap_item_policy< boost::container::set< T, Traits, Alloc > > + : public details::boost_set_copy_policies< boost::container::set< T, Traits, Alloc > >::swap_item_policy + {}; + + // Swap policy is not defined for boost::container::set + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for boost::container::set + template + struct move_item_policy< boost::container::set< T, Traits, Alloc > > + : public details::boost_set_copy_policies< boost::container::set< T, Traits, Alloc > >::move_item_policy + {}; +#endif + + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + /// boost::container::flat_set adapter for hash set bucket + template + class adapt< boost::container::set, CDS_OPTIONS > + { + public: + typedef boost::container::set container_type ; ///< underlying container type + typedef cds::container::striped_set::details::boost_set_adapter< container_type, CDS_OPTIONS > type; + }; +}}} // namespace cds::intrusive::striped_set + + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_SET_BOOST_SET_ADAPTER_H diff --git a/cds/container/striped_set/boost_slist.h b/cds/container/striped_set/boost_slist.h new file mode 100644 index 00000000..fb8d65fc --- /dev/null +++ b/cds/container/striped_set/boost_slist.h @@ -0,0 +1,277 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_SET_BOOST_SLIST_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_SET_BOOST_SLIST_ADAPTER_H + +#include +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for boost::container::slist + template + struct copy_item_policy< boost::container::slist< T, Alloc > > + { + typedef boost::container::slist< T, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + list.insert_after( itInsert, *itWhat ); + } + }; + + // Swap policy for boost::container::slist + template + struct swap_item_policy< boost::container::slist< T, Alloc > > + { + typedef boost::container::slist< T, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + T newVal; + itInsert = list.insert_after( itInsert, newVal ); + std::swap( *itInsert, *itWhat ); + } + }; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for boost::container::slist + template + struct move_item_policy< boost::container::slist< T, Alloc > > + { + typedef boost::container::slist< T, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + list.insert_after( itInsert, std::move( *itWhat ) ); + } + }; +#endif + + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// boost::container::slist adapter for hash set bucket + template + class adapt< boost::container::slist, CDS_OPTIONS > + { + public: + typedef boost::container::slist container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = true; + static bool const has_erase_with = true; + + private: + //@cond + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, CDS_OPTIONS >::type key_comparator; + + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , CDS_OPTIONS + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy +#endif + >::type copy_item; + + template + std::pair< iterator, bool > find_prev_item( Q const& key ) + { + iterator itPrev = m_List.before_begin(); + iterator itEnd = m_List.end(); + for ( iterator it = m_List.begin(); it != itEnd; ++it ) { + int nCmp = key_comparator()( key, *it ); + if ( nCmp < 0 ) + itPrev = it; + else if ( nCmp > 0 ) + break; + else + return std::make_pair( itPrev, true ); + } + return std::make_pair( itPrev, false ); + } + + template + std::pair< iterator, bool > find_prev_item( Q const& key, Less pred ) + { + iterator itPrev = m_List.before_begin(); + iterator itEnd = m_List.end(); + for ( iterator it = m_List.begin(); it != itEnd; ++it ) { + if ( pred( key, *it )) + itPrev = it; + else if ( pred( *it, key ) ) + break; + else + return std::make_pair( itPrev, true ); + } + return std::make_pair( itPrev, false ); + } + + //@endcond + + private: + //@cond + container_type m_List; + //@endcond + + public: + adapted_container() + {} + + template + bool insert( const Q& val, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( val ); + if ( !pos.second ) { + value_type newItem( val ); + pos.first = m_List.insert_after( pos.first, newItem ); + cds::unref( f )( *pos.first ); + return true; + } + + // key already exists + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace( Args&&... args ) + { + value_type val( std::forward(args)... ); + std::pair< iterator, bool > pos = find_prev_item( val ); + if ( !pos.second ) { + m_List.emplace_after( pos.first, std::move( val ) ); + return true; + } + return false; + } +# endif + + template + std::pair ensure( const Q& val, Func func ) + { + std::pair< iterator, bool > pos = find_prev_item( val ); + if ( !pos.second ) { + // insert new + value_type newItem( val ); + pos.first = m_List.insert_after( pos.first, newItem ); + cds::unref( func )( true, *pos.first, val ); + return std::make_pair( true, true ); + } + else { + // already exists + cds::unref( func )( false, *(++pos.first), val ); + return std::make_pair( true, false ); + } + } + + template + bool erase( Q const& key, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( key ); + if ( !pos.second ) + return false; + + // key exists + iterator it = pos.first; + cds::unref( f )( *(++it) ); + m_List.erase_after( pos.first ); + + return true; + } + + template + bool erase( Q const& key, Less pred, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( key, pred ); + if ( !pos.second ) + return false; + + // key exists + iterator it = pos.first; + cds::unref( f )( *(++it) ); + m_List.erase_after( pos.first ); + + return true; + } + + template + bool find( Q& val, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( val ); + if ( !pos.second ) + return false; + + // key exists + cds::unref( f )( *(++pos.first), val ); + return true; + } + + template + bool find( Q& val, Less pred, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( val, pred ); + if ( !pos.second ) + return false; + + // key exists + cds::unref( f )( *(++pos.first), val ); + return true; + } + + void clear() + { + m_List.clear(); + } + + iterator begin() { return m_List.begin(); } + const_iterator begin() const { return m_List.begin(); } + iterator end() { return m_List.end(); } + const_iterator end() const { return m_List.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + std::pair< iterator, bool > pos = find_prev_item( *itWhat ); + assert( !pos.second ); + + copy_item()( m_List, pos.first, itWhat ); + } + + size_t size() const + { + return m_List.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrusive::striped_set + + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_SET_BOOST_SLIST_ADAPTER_H diff --git a/cds/container/striped_set/boost_stable_vector.h b/cds/container/striped_set/boost_stable_vector.h new file mode 100644 index 00000000..d83a55ab --- /dev/null +++ b/cds/container/striped_set/boost_stable_vector.h @@ -0,0 +1,354 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_SET_BOOST_STABLE_VECTOR_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_SET_BOOST_STABLE_VECTOR_ADAPTER_H + +#include +#if BOOST_VERSION < 104800 +# error "For boost::container::stable_vector you must use boost 1.48 or above" +#endif + +#include +#include +#include +#include // std::lower_bound +#include // std::pair + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for boost::container::stable_vector + template + struct copy_item_policy< boost::container::stable_vector< T, Alloc > > + { + typedef boost::container::stable_vector< T, Alloc > vector_type; + typedef typename vector_type::iterator iterator; + + void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) + { + vec.insert( itInsert, *itWhat ); + } + }; + + // Swap policy for boost::container::stable_vector + template + struct swap_item_policy< boost::container::stable_vector< T, Alloc > > + { + typedef boost::container::stable_vector< T, Alloc > vector_type; + typedef typename vector_type::iterator iterator; + + void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) + { + typename vector_type::value_type newVal; + itInsert = vec.insert( itInsert, newVal ); + std::swap( *itInsert, *itWhat ); + } + }; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for boost::container::stable_vector + template + struct move_item_policy< boost::container::stable_vector< T, Alloc > > + { + typedef boost::container::stable_vector< T, Alloc > vector_type; + typedef typename vector_type::iterator iterator; + + void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) + { + vec.insert( itInsert, std::move( *itWhat )); + } + }; +#endif + + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + /// boost::container::stable_vector adapter for hash set bucket + template + class adapt< boost::container::stable_vector, CDS_OPTIONS > + { + public: + typedef boost::container::stable_vector container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = true; + static bool const has_erase_with = true; + + private: + //@cond + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, CDS_OPTIONS >::type key_comparator; + + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , CDS_OPTIONS + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy +#endif + >::type copy_item; + + struct find_predicate + { + bool operator()( value_type const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( Q const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( value_type const& i1, Q const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + }; + //@endcond + + private: + //@cond + container_type m_Vector; + //@endcond + + public: + + /// Insert value \p val of type \p Q into the container + /** + The function allows to split creating of new item into two part: + - create item with key only from \p val + - try to insert new item into the container + - if inserting is success, calls \p f functor to initialize value-field of the new item. + + The functor signature is: + \code + void func( value_type& item ); + \endcode + where \p item is the item inserted. + + The type \p Q may differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q and constructible from type \p Q, + + The user-defined functor is called only if the inserting is success. It may be passed by reference + using boost::ref + */ + template + bool insert( const Q& val, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate() ); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { + value_type newItem( val ); + it = m_Vector.insert( it, newItem ); + cds::unref( f )( *it ); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace( Args&&... args ) + { + value_type val( std::forward(args)... ); + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate() ); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { + it = m_Vector.emplace( it, std::move( val ) ); + return true; + } + return false; + } +# endif + + /// Ensures that the \p item exists in the container + /** + The operation performs inserting or changing data. + + If the \p val key not found in the container, then the new item created from \p val + is inserted. Otherwise, the functor \p func is called with the item found. + The \p Func functor has interface: + \code + void func( bool bNew, value_type& item, const Q& val ); + \endcode + or like a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + + where arguments are: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - container's item + - \p val - argument \p val passed into the \p ensure function + + The functor may change non-key fields of the \p item. + + The type \p Q may differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q and constructible from type \p Q, + + You may pass \p func argument by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p val key + already exists. + */ + template + std::pair ensure( const Q& val, Func func ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate() ); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { + // insert new + value_type newItem( val ); + it = m_Vector.insert( it, newItem ); + cds::unref( func )( true, *it, val ); + return std::make_pair( true, true ); + } + else { + // already exists + cds::unref( func )( false, *it, val ); + return std::make_pair( true, false ); + } + } + + /// Delete \p key + /** + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface is: + \code + struct extractor { + void operator()(value_type const& val); + }; + \endcode + The functor may be passed by reference using boost:ref + + The type \p Q may differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( const Q& key, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), key, find_predicate() ); + if ( it == m_Vector.end() || key_comparator()( key, *it ) != 0 ) + return false; + + // key exists + cds::unref( f )( *it ); + m_Vector.erase( it ); + return true; + } + + template + bool erase( const Q& key, Less pred, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), key, pred ); + if ( it == m_Vector.end() || pred( key, *it ) || pred( *it, key ) ) + return false; + + // key exists + cds::unref( f )( *it ); + m_Vector.erase( it ); + return true; + } + + /// Find the key \p val + /** + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + The type \p Q may differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate() ); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) + return false; + + // key exists + cds::unref( f )( *it, val ); + return true; + } + + template + bool find( Q& val, Less pred, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, pred ); + if ( it == m_Vector.end() || pred( val, *it ) || pred( *it, val ) ) + return false; + + // key exists + cds::unref( f )( *it, val ); + return true; + } + + /// Clears the container + void clear() + { + m_Vector.clear(); + } + + iterator begin() { return m_Vector.begin(); } + const_iterator begin() const { return m_Vector.begin(); } + iterator end() { return m_Vector.end(); } + const_iterator end() const { return m_Vector.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), *itWhat, find_predicate() ); + assert( it == m_Vector.end() || key_comparator()( *itWhat, *it ) != 0 ); + + copy_item()( m_Vector, it, itWhat ); + } + + size_t size() const + { + return m_Vector.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrusive::striped_set + + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_SET_BOOST_STABLE_VECTOR_ADAPTER_H diff --git a/cds/container/striped_set/boost_unordered_set.h b/cds/container/striped_set/boost_unordered_set.h new file mode 100644 index 00000000..5c9b98bc --- /dev/null +++ b/cds/container/striped_set/boost_unordered_set.h @@ -0,0 +1,47 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_SET_BOOST_UNORDERED_SET_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_SET_BOOST_UNORDERED_SET_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for boost::unordered_set + template + struct copy_item_policy< boost::unordered_set< T, Traits, Alloc > > + : public details::boost_set_copy_policies< boost::unordered_set< T, Traits, Alloc > >::copy_item_policy + {}; + + template + struct swap_item_policy< boost::unordered_set< T, Traits, Alloc > > + : public details::boost_set_copy_policies< boost::unordered_set< T, Traits, Alloc > >::swap_item_policy + {}; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for boost::unordered_set + template + struct move_item_policy< boost::unordered_set< T, Traits, Alloc > > + : public details::boost_set_copy_policies< boost::unordered_set< T, Traits, Alloc > >::move_item_policy + {}; +#endif + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + /// boost::unordered_set adapter for hash set bucket + template + class adapt< boost::unordered_set, CDS_OPTIONS > + { + public: + typedef boost::unordered_set container_type ; ///< underlying container type + typedef cds::container::striped_set::details::boost_set_adapter< container_type, CDS_OPTIONS > type; + }; +}}} // namespace cds::intrusive::striped_set + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_SET_BOOST_UNORDERED_SET_ADAPTER_H diff --git a/cds/container/striped_set/boost_vector.h b/cds/container/striped_set/boost_vector.h new file mode 100644 index 00000000..076bfd80 --- /dev/null +++ b/cds/container/striped_set/boost_vector.h @@ -0,0 +1,261 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_SET_BOOST_VECTOR_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_SET_BOOST_VECTOR_ADAPTER_H + +#include +#if BOOST_VERSION < 104800 +# error "For boost::container::vector you must use boost 1.48 or above" +#endif + +#include // lower_bound +#include +#include +#include // std::lower_bound +#include // std::pair + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for boost::container::vector + template + struct copy_item_policy< boost::container::vector< T, Alloc > > + { + typedef boost::container::vector< T, Alloc > vector_type; + typedef typename vector_type::iterator iterator; + + void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) + { + vec.insert( itInsert, *itWhat ); + } + }; + + // Swap policy for boost::container::vector + template + struct swap_item_policy< boost::container::vector< T, Alloc > > + { + typedef boost::container::vector< T, Alloc > vector_type; + typedef typename vector_type::iterator iterator; + + void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) + { + typename vector_type::value_type newVal; + itInsert = vec.insert( itInsert, newVal ); + std::swap( *itInsert, *itWhat ); + } + }; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for boost::container::vector + template + struct move_item_policy< boost::container::vector< T, Alloc > > + { + typedef boost::container::vector< T, Alloc > vector_type; + typedef typename vector_type::iterator iterator; + + void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) + { + vec.insert( itInsert, std::move( *itWhat )); + } + }; +#endif + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// boost::container::vector adapter for hash set bucket + template + class adapt< boost::container::vector, CDS_OPTIONS > + { + public: + typedef boost::container::vector container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = true; + static bool const has_erase_with = true; + + private: + //@cond + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, CDS_OPTIONS >::type key_comparator; + + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , CDS_OPTIONS + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy +#endif + >::type copy_item; + + struct find_predicate + { + bool operator()( value_type const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( Q const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( value_type const& i1, Q const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + }; + //@endcond + + private: + //@cond + container_type m_Vector; + //@endcond + + public: + template + bool insert( const Q& val, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate() ); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { + value_type newItem( val ); + it = m_Vector.insert( it, newItem ); + cds::unref( f )( *it ); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace( Args&&... args ) + { + value_type val( std::forward(args)... ); + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate() ); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { + it = m_Vector.emplace( it, std::move( val ) ); + return true; + } + return false; + } +# endif + + template + std::pair ensure( const Q& val, Func func ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate() ); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { + // insert new + value_type newItem( val ); + it = m_Vector.insert( it, newItem ); + cds::unref( func )( true, *it, val ); + return std::make_pair( true, true ); + } + else { + // already exists + cds::unref( func )( false, *it, val ); + return std::make_pair( true, false ); + } + } + + template + bool erase( const Q& key, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), key, find_predicate() ); + if ( it == m_Vector.end() || key_comparator()( key, *it ) != 0 ) + return false; + + // key exists + cds::unref( f )( *it ); + m_Vector.erase( it ); + return true; + } + + template + bool erase( Q const& key, Less pred, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), key, pred ); + if ( it == m_Vector.end() || pred( key, *it ) || pred( *it, key ) ) + return false; + + // key exists + cds::unref( f )( *it ); + m_Vector.erase( it ); + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate() ); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) + return false; + + // key exists + cds::unref( f )( *it, val ); + return true; + } + + template + bool find( Q& val, Less pred, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, pred ); + if ( it == m_Vector.end() || pred( val, *it ) || pred( *it, val ) ) + return false; + + // key exists + cds::unref( f )( *it, val ); + return true; + } + + /// Clears the container + void clear() + { + m_Vector.clear(); + } + + iterator begin() { return m_Vector.begin(); } + const_iterator begin() const { return m_Vector.begin(); } + iterator end() { return m_Vector.end(); } + const_iterator end() const { return m_Vector.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), *itWhat, find_predicate() ); + assert( it == m_Vector.end() || key_comparator()( *itWhat, *it ) != 0 ); + + copy_item()( m_Vector, it, itWhat ); + } + + size_t size() const + { + return m_Vector.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrusive::striped_set + + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_SET_BOOST_VECTOR_ADAPTER_H diff --git a/cds/container/striped_set/std_hash_set.h b/cds/container/striped_set/std_hash_set.h new file mode 100644 index 00000000..fcfb5e99 --- /dev/null +++ b/cds/container/striped_set/std_hash_set.h @@ -0,0 +1,13 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_SET_STD_HASH_SET_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_SET_STD_HASH_SET_ADAPTER_H + +#include +#if (CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL) && _MSC_VER < 1600 // MS VC 2008 +# include +#else +# include +#endif + +#endif // #ifndef __CDS_CONTAINER_STRIPED_SET_STD_HASH_SET_ADAPTER_H diff --git a/cds/container/striped_set/std_hash_set_std.h b/cds/container/striped_set/std_hash_set_std.h new file mode 100644 index 00000000..c02d6dd5 --- /dev/null +++ b/cds/container/striped_set/std_hash_set_std.h @@ -0,0 +1,178 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_SET_STD_HASH_SET_STD_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_SET_STD_HASH_SET_STD_ADAPTER_H + +#ifndef __CDS_CONTAINER_STRIPED_SET_STD_HASH_SET_ADAPTER_H +# error must be included instead of header +#endif + +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for std::unordered_set + template + struct copy_item_policy< std::unordered_set< T, Hash, Pred, Alloc > > + { + typedef std::unordered_set< T, Hash, Pred, Alloc > set_type; + typedef typename set_type::iterator iterator; + + void operator()( set_type& set, iterator itWhat ) + { + set.insert( *itWhat ); + } + }; + + template + struct swap_item_policy< std::unordered_set< T, Hash, Pred, Alloc > >: public copy_item_policy< std::unordered_set< T, Hash, Pred, Alloc > > + {}; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for std::unordered_set + template + struct move_item_policy< std::unordered_set< T, Hash, Pred, Alloc > > + { + typedef std::unordered_set< T, Hash, Pred, Alloc > set_type; + typedef typename set_type::iterator iterator; + + void operator()( set_type& set, iterator itWhat ) + { + set.insert( std::move( *itWhat ) ); + } + }; +#endif + + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + /// std::unordered_set adapter for hash set bucket + template + class adapt< std::unordered_set, CDS_OPTIONS > + { + public: + typedef std::unordered_set container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = false; + static bool const has_erase_with = false; + + private: + //@cond + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , CDS_OPTIONS + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy // not defined +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy +#endif + >::type copy_item; + //@endcond + + private: + //@cond + container_type m_Set; + //@endcond + + public: + template + bool insert( const Q& val, Func f ) + { + std::pair res = m_Set.insert( value_type(val) ); + if ( res.second ) + ::cds::unref(f)( const_cast(*res.first) ); + return res.second; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace( Args&&... args ) + { +# if CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION < 40800 || CDS_COMPILER == CDS_COMPILER_CLANG && !defined(__LIBCPP_VERSION) + // GCC < 4.8: std::set has no "emplace" member function. Emulate it + std::pair res = m_Set.insert( value_type( std::forward(args)...)); +# else + std::pair res = m_Set.emplace( std::forward(args)... ); +# endif + return res.second; + } +# endif + + template + std::pair ensure( const Q& val, Func func ) + { + std::pair res = m_Set.insert( value_type(val) ); + cds::unref(func)( res.second, const_cast(*res.first), val ); + return std::make_pair( true, res.second ); + } + + template + bool erase( const Q& key, Func f ) + { + const_iterator it = m_Set.find( value_type(key) ); + if ( it == m_Set.end() ) + return false; + cds::unref(f)( const_cast(*it) ); + m_Set.erase( it ); + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = m_Set.find( value_type(val) ); + if ( it == m_Set.end() ) + return false; + cds::unref(f)( const_cast(*it), val ); + return true; + } + + /// Clears the container + void clear() + { + m_Set.clear(); + } + + iterator begin() { return m_Set.begin(); } + const_iterator begin() const { return m_Set.begin(); } + iterator end() { return m_Set.end(); } + const_iterator end() const { return m_Set.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + assert( m_Set.find( *itWhat ) == m_Set.end() ); + copy_item()( m_Set, itWhat ); + } + + size_t size() const + { + return m_Set.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + }; +}}} // namespace cds::intrusive::striped_set + + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_SET_STD_HASH_SET_STD_ADAPTER_H diff --git a/cds/container/striped_set/std_hash_set_vc.h b/cds/container/striped_set/std_hash_set_vc.h new file mode 100644 index 00000000..51f7d0b4 --- /dev/null +++ b/cds/container/striped_set/std_hash_set_vc.h @@ -0,0 +1,166 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_SET_STD_HASH_SET_MSVC_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_SET_STD_HASH_SET_MSVC_ADAPTER_H + +#ifndef __CDS_CONTAINER_STRIPED_SET_STD_HASH_SET_ADAPTER_H +# error must be included instead of header +#endif + +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for stdext::hash_set + template + struct copy_item_policy< stdext::hash_set< T, Traits, Alloc > > + { + typedef stdext::hash_set< T, Traits, Alloc > set_type; + typedef typename set_type::iterator iterator; + + void operator()( set_type& set, iterator itWhat ) + { + set.insert( *itWhat ); + } + }; + + template + struct swap_item_policy< stdext::hash_set< T, Traits, Alloc > >: public copy_item_policy< stdext::hash_set< T, Traits, Alloc > > + {}; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for stdext::hash_set + template + struct move_item_policy< stdext::hash_set< T, Traits, Alloc > > + { + typedef stdext::hash_set< T, Traits, Alloc > set_type; + typedef typename set_type::iterator iterator; + + void operator()( set_type& set, iterator itWhat ) + { + set.insert( std::move( *itWhat ) ); + } + }; +#endif + + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// std::unordered_set adapter for hash set bucket + template + class adapt< stdext::hash_set, CDS_OPTIONS > + { + public: + typedef stdext::hash_set container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = false; + static bool const has_erase_with = false; + + private: + //@cond + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , CDS_OPTIONS + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy // not defined +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy +#endif + >::type copy_item; + //@endcond + + private: + //@cond + container_type m_Set; + //@endcond + + public: + + template + bool insert( const Q& val, Func f ) + { + std::pair res = m_Set.insert( value_type(val) ); + if ( res.second ) + ::cds::unref(f)( *res.first ); + return res.second; + } + + template + std::pair ensure( const Q& val, Func func ) + { + std::pair res = m_Set.insert( value_type(val) ); + ::cds::unref(func)( res.second, *res.first, val ); + return std::make_pair( true, res.second ); + } + + template + bool erase( const Q& key, Func f ) + { + iterator it = m_Set.find( value_type(key) ); + if ( it == m_Set.end() ) + return false; + ::cds::unref(f)( *it ); + m_Set.erase( it ); + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = m_Set.find( value_type(val) ); + if ( it == m_Set.end() ) + return false; + ::cds::unref(f)( *it, val ); + return true; + } + + /// Clears the container + void clear() + { + m_Set.clear(); + } + + iterator begin() { return m_Set.begin(); } + const_iterator begin() const { return m_Set.begin(); } + iterator end() { return m_Set.end(); } + const_iterator end() const { return m_Set.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + assert( m_Set.find( *itWhat ) == m_Set.end() ); + copy_item()( m_Set, itWhat ); + } + + size_t size() const + { + return m_Set.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrusive::striped_set + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_SET_STD_HASH_SET_MSVC_ADAPTER_H diff --git a/cds/container/striped_set/std_list.h b/cds/container/striped_set/std_list.h new file mode 100644 index 00000000..aa53fec0 --- /dev/null +++ b/cds/container/striped_set/std_list.h @@ -0,0 +1,302 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_SET_STD_LIST_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_SET_STD_LIST_ADAPTER_H + +#include +#include +#include +#include // std::lower_bound + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for std::list + template + struct copy_item_policy< std::list< T, Alloc > > + { + typedef std::list< T, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + itInsert = list.insert( itInsert, *itWhat ); + } + }; + + // Swap policy for std::list + template + struct swap_item_policy< std::list< T, Alloc > > + { + typedef std::list< T, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + typename list_type::value_type newVal; + itInsert = list.insert( itInsert, newVal ); + std::swap( *itWhat, *itInsert ); + } + }; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for std::list + template + struct move_item_policy< std::list< T, Alloc > > + { + typedef std::list< T, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + list.insert( itInsert, std::move( *itWhat ) ); + } + }; +#endif + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// std::list adapter for hash set bucket + template + class adapt< std::list, CDS_OPTIONS > + { + public: + typedef std::list container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = true; + static bool const has_erase_with = true; + + private: + //@cond + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, CDS_OPTIONS >::type key_comparator; + + + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , CDS_OPTIONS + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy +#endif + >::type copy_item; + + struct find_predicate + { + bool operator()( value_type const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( Q const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( value_type const& i1, Q const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + }; + //@endcond + + private: + //@cond + container_type m_List; +# ifdef __GLIBCXX__ + // GCC C++ lib bug: + // In GCC (at least up to 4.7.x), the complexity of std::list::size() is O(N) + // (see http://gcc.gnu.org/bugzilla/show_bug.cgi?id=49561) + size_t m_nSize ; // list size +# endif + //@endcond + + public: + adapted_container() +# ifdef __GLIBCXX__ + : m_nSize(0) +# endif + {} + + template + bool insert( const Q& val, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate() ); + if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { + value_type newItem( val ); + it = m_List.insert( it, newItem ); + cds::unref( f )( *it ); + +# ifdef __GLIBCXX__ + ++m_nSize; +# endif + return true; + } + + // key already exists + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace( Args&&... args ) + { +#if CDS_COMPILER == CDS_COMPILER_MSVC && CDS_COMPILER_VERSION == CDS_COMPILER_MSVC12 + // MS VC++ 2013: internal compiler error + // Use assignment workaround, see http://connect.microsoft.com/VisualStudio/feedback/details/804941/visual-studio-2013-rc-c-internal-compiler-error-with-std-forward + value_type val = value_type( std::forward(args)... ); +#else + value_type val(std::forward(args)...); +#endif + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate() ); + if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { + it = m_List.emplace( it, std::move( val ) ); +# ifdef __GLIBCXX__ + ++m_nSize; +# endif + return true; + } + return false; + } +# endif + + template + std::pair ensure( const Q& val, Func func ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate() ); + if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { + // insert new + value_type newItem( val ); + it = m_List.insert( it, newItem ); + cds::unref( func )( true, *it, val ); +# ifdef __GLIBCXX__ + ++m_nSize; +# endif + return std::make_pair( true, true ); + } + else { + // already exists + cds::unref( func )( false, *it, val ); + return std::make_pair( true, false ); + } + } + + template + bool erase( const Q& key, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate() ); + if ( it == m_List.end() || key_comparator()( key, *it ) != 0 ) + return false; + + // key exists + cds::unref( f )( *it ); + m_List.erase( it ); +# ifdef __GLIBCXX__ + --m_nSize; +# endif + + return true; + } + + template + bool erase( Q const& key, Less pred, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, pred ); + if ( it == m_List.end() || pred( key, *it ) || pred( *it, key ) ) + return false; + + // key exists + cds::unref( f )( *it ); + m_List.erase( it ); +# ifdef __GLIBCXX__ + --m_nSize; +# endif + + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate() ); + if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) + return false; + + // key exists + cds::unref( f )( *it, val ); + return true; + } + + template + bool find( Q& val, Less pred, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, pred ); + if ( it == m_List.end() || pred( val, *it ) || pred( *it, val ) ) + return false; + + // key exists + cds::unref( f )( *it, val ); + return true; + } + + /// Clears the container + void clear() + { + m_List.clear(); + } + + iterator begin() { return m_List.begin(); } + const_iterator begin() const { return m_List.begin(); } + iterator end() { return m_List.end(); } + const_iterator end() const { return m_List.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), *itWhat, find_predicate() ); + assert( it == m_List.end() || key_comparator()( *itWhat, *it ) != 0 ); + + copy_item()( m_List, it, itWhat ); +# ifdef __GLIBCXX__ + ++m_nSize; +# endif + } + + size_t size() const + { +# ifdef __GLIBCXX__ + return m_nSize; +# else + return m_List.size(); +# endif + + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} + + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_SET_STD_LIST_ADAPTER_H diff --git a/cds/container/striped_set/std_set.h b/cds/container/striped_set/std_set.h new file mode 100644 index 00000000..96be8780 --- /dev/null +++ b/cds/container/striped_set/std_set.h @@ -0,0 +1,175 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_SET_STD_SET_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_SET_STD_SET_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for std::set + template + struct copy_item_policy< std::set< T, Traits, Alloc > > + { + typedef std::set< T, Traits, Alloc > set_type; + typedef typename set_type::iterator iterator; + + void operator()( set_type& set, iterator itWhat ) + { + set.insert( *itWhat ); + } + }; + + template + struct swap_item_policy< std::set< T, Traits, Alloc > >: public copy_item_policy< std::set< T, Traits, Alloc > > + {}; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for std::set + template + struct move_item_policy< std::set< T, Traits, Alloc > > + { + typedef std::set< T, Traits, Alloc > set_type; + typedef typename set_type::iterator iterator; + + void operator()( set_type& set, iterator itWhat ) + { + set.insert( std::move( *itWhat ) ); + } + }; +#endif + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// std::set adapter for hash set bucket + template + class adapt< std::set, CDS_OPTIONS > + { + public: + typedef std::set container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = false; + static bool const has_erase_with = false; + + private: + //@cond + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , CDS_OPTIONS + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy +#endif + >::type copy_item; + //@endcond + + private: + //@cond + container_type m_Set; + //@endcond + + public: + + template + bool insert( const Q& val, Func f ) + { + std::pair res = m_Set.insert( value_type(val) ); + if ( res.second ) + ::cds::unref(f)( const_cast(*res.first) ); + return res.second; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace( Args&&... args ) + { +# if CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION < 40800 || CDS_COMPILER == CDS_COMPILER_CLANG && !defined(__LIBCPP_VERSION) + // GCC < 4.8: std::set has no "emplace" member function. Emulate it + std::pair res = m_Set.insert( value_type( std::forward(args)...)); +# else + std::pair res = m_Set.emplace( std::forward(args)... ); +# endif + return res.second; + } +# endif + + template + std::pair ensure( const Q& val, Func func ) + { + std::pair res = m_Set.insert( value_type(val) ); + ::cds::unref(func)( res.second, const_cast(*res.first), val ); + return std::make_pair( true, res.second ); + } + + template + bool erase( const Q& key, Func f ) + { + iterator it = m_Set.find( value_type(key) ); + if ( it == m_Set.end() ) + return false; + ::cds::unref(f)( const_cast(*it) ); + m_Set.erase( it ); + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = m_Set.find( value_type(val) ); + if ( it == m_Set.end() ) + return false; + ::cds::unref(f)( const_cast(*it), val ); + return true; + } + + void clear() + { + m_Set.clear(); + } + + iterator begin() { return m_Set.begin(); } + const_iterator begin() const { return m_Set.begin(); } + iterator end() { return m_Set.end(); } + const_iterator end() const { return m_Set.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + assert( m_Set.find( *itWhat ) == m_Set.end() ); + copy_item()( m_Set, itWhat ); + } + + size_t size() const + { + return m_Set.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrusive::striped_set + + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_SET_STD_SET_ADAPTER_H diff --git a/cds/container/striped_set/std_vector.h b/cds/container/striped_set/std_vector.h new file mode 100644 index 00000000..37010e6f --- /dev/null +++ b/cds/container/striped_set/std_vector.h @@ -0,0 +1,263 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_STRIPED_SET_STD_VECTOR_ADAPTER_H +#define __CDS_CONTAINER_STRIPED_SET_STD_VECTOR_ADAPTER_H + +#include // lower_bound +#include +#include +#include // std::lower_bound +#include // std::pair + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for std::vector + template + struct copy_item_policy< std::vector< T, Alloc > > + { + typedef std::vector< T, Alloc > vector_type; + typedef typename vector_type::iterator iterator; + + void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) + { + vec.insert( itInsert, *itWhat ); + } + }; + + // Swap policy for std::vector + template + struct swap_item_policy< std::vector< T, Alloc > > + { + typedef std::vector< T, Alloc > vector_type; + typedef typename vector_type::iterator iterator; + + void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) + { + typename vector_type::value_type newVal; + itInsert = vec.insert( itInsert, newVal ); + std::swap( *itInsert, *itWhat ); + } + }; + +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + // Move policy for std::vector + template + struct move_item_policy< std::vector< T, Alloc > > + { + typedef std::vector< T, Alloc > vector_type; + typedef typename vector_type::iterator iterator; + + void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) + { + vec.insert( itInsert, std::move( *itWhat )); + } + }; +#endif + + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// std::vector adapter for hash set bucket + template + class adapt< std::vector, CDS_OPTIONS > + { + public: + typedef std::vector container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = true; + static bool const has_erase_with = true; + + private: + //@cond + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, CDS_OPTIONS >::type key_comparator; + + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , CDS_OPTIONS + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy +#ifdef CDS_MOVE_SEMANTICS_SUPPORT + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy +#endif + >::type copy_item; + + struct find_predicate + { + bool operator()( value_type const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( Q const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( value_type const& i1, Q const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + }; + //@endcond + + private: + //@cond + container_type m_Vector; + //@endcond + + public: + + template + bool insert( const Q& val, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate() ); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { + value_type newItem( val ); + it = m_Vector.insert( it, newItem ); + cds::unref( f )( *it ); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + template + bool emplace( Args&&... args ) + { +#if CDS_COMPILER == CDS_COMPILER_MSVC && CDS_COMPILER_VERSION == CDS_COMPILER_MSVC12 + // MS VC++ 2013 internal compiler error + // Use assignment workaround, see http://connect.microsoft.com/VisualStudio/feedback/details/804941/visual-studio-2013-rc-c-internal-compiler-error-with-std-forward + value_type val = value_type(std::forward(args)...); +#else + value_type val( std::forward(args)... ); +#endif + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate() ); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { + it = m_Vector.emplace( it, std::move( val ) ); + return true; + } + return false; + } +# endif + + template + std::pair ensure( const Q& val, Func func ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate() ); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { + // insert new + value_type newItem( val ); + it = m_Vector.insert( it, newItem ); + cds::unref( func )( true, *it, val ); + return std::make_pair( true, true ); + } + else { + // already exists + cds::unref( func )( false, *it, val ); + return std::make_pair( true, false ); + } + } + + template + bool erase( const Q& key, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), key, find_predicate() ); + if ( it == m_Vector.end() || key_comparator()( key, *it ) != 0 ) + return false; + + // key exists + cds::unref( f )( *it ); + m_Vector.erase( it ); + return true; + } + + template + bool erase( const Q& key, Less pred, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), key, pred ); + if ( it == m_Vector.end() || pred( key, *it ) || pred( *it, key ) ) + return false; + + // key exists + cds::unref( f )( *it ); + m_Vector.erase( it ); + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate() ); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) + return false; + + // key exists + cds::unref( f )( *it, val ); + return true; + } + + template + bool find( Q& val, Less pred, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, pred ); + if ( it == m_Vector.end() || pred( val, *it ) || pred( *it, val ) ) + return false; + + // key exists + cds::unref( f )( *it, val ); + return true; + } + + + void clear() + { + m_Vector.clear(); + } + + iterator begin() { return m_Vector.begin(); } + const_iterator begin() const { return m_Vector.begin(); } + iterator end() { return m_Vector.end(); } + const_iterator end() const { return m_Vector.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), *itWhat, find_predicate() ); + assert( it == m_Vector.end() || key_comparator()( *itWhat, *it ) != 0 ); + + copy_item()( m_Vector, it, itWhat ); + } + + size_t size() const + { + return m_Vector.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrusive::striped_set + +//@endcond + +#endif // #ifndef __CDS_CONTAINER_STRIPED_SET_STD_VECTOR_ADAPTER_H diff --git a/cds/container/treiber_stack.h b/cds/container/treiber_stack.h new file mode 100644 index 00000000..63d0e860 --- /dev/null +++ b/cds/container/treiber_stack.h @@ -0,0 +1,297 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_TREIBER_STACK_H +#define __CDS_CONTAINER_TREIBER_STACK_H + +#include +#include +#include + +namespace cds { namespace container { + + //@cond + namespace treiber_stack { + using cds::intrusive::treiber_stack::stat; + using cds::intrusive::treiber_stack::empty_stat; + + template + struct make_treiber_stack + { + typedef T value_type; + + struct default_options { + typedef cds::backoff::Default back_off; + typedef CDS_DEFAULT_ALLOCATOR allocator; + typedef cds::opt::v::relaxed_ordering memory_model; + typedef cds::atomicity::empty_item_counter item_counter; + typedef empty_stat stat; + + // Elimination back-off options + static CDS_CONSTEXPR_CONST bool enable_elimination = false; + typedef cds::backoff::delay<> elimination_backoff; + typedef opt::v::static_buffer< int, 4 > buffer; + typedef opt::v::c_rand random_engine; + typedef cds::lock::Spin lock_type; + }; + + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< default_options, CDS_OPTIONS11 >::type + ,CDS_OPTIONS11 + >::type options; + + typedef GC gc; + typedef typename options::memory_model memory_model; + + struct node_type: public cds::intrusive::single_link::node< gc > + { + value_type m_value; + + node_type( const value_type& val ) + : m_value( val ) + {} +# ifdef CDS_EMPLACE_SUPPORT + template + node_type( Args&&... args ) + : m_value( std::forward(args)...) + {} +# else + node_type() + {} +# endif + }; + + typedef typename options::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + + typedef intrusive::TreiberStack< + gc + ,node_type + ,intrusive::opt::hook< + intrusive::single_link::base_hook< cds::opt::gc > + > + ,cds::opt::back_off< typename options::back_off > + ,cds::intrusive::opt::disposer< node_deallocator > + ,cds::opt::memory_model< memory_model > + ,cds::opt::item_counter< typename options::item_counter > + ,cds::opt::stat< typename options::stat > + ,cds::opt::enable_elimination< options::enable_elimination > + ,cds::opt::buffer< typename options::buffer > + ,cds::opt::random_engine< typename options::random_engine > + ,cds::opt::elimination_backoff< typename options::elimination_backoff > + ,cds::opt::lock_type< typename options::lock_type > + > type; + }; + } // namespace treiber_stack + //@endcond + + /// Treiber's stack algorithm + /** @ingroup cds_nonintrusive_stack + It is non-intrusive version of Treiber's stack algorithm based on intrusive implementation + intrusive::TreiberStack. + + Template arguments: + - \p GC - garbage collector type: gc::HP, gc::HRC, gc::PTB + - \p T - type stored in the stack. It should be default-constructible, copy-constructible, assignable type. + - \p Options - options + + Available \p Options: + - opt::allocator - allocator (like \p std::allocator). Default is \ref CDS_DEFAULT_ALLOCATOR + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::Default is used + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter + - opt::stat - the type to gather internal statistics. + Possible option value are: \ref cds::intrusive::treiber_stack::stat "treiber_stack::stat", + \ref cds::intrusive::treiber_stack::empty_stat "treiber_stack::empty_stat" (the default), + user-provided class that supports treiber_stack::stat interface. + - opt::enable_elimination - enable elimination back-off for the stack. Default value is \p valse. + + If elimination back-off is enabled (\p %cds::opt::enable_elimination< true >) additional options can be specified: + - opt::buffer - a buffer type for elimination array, see \p opt::v::static_buffer, \p opt::v::dynamic_buffer. + The buffer can be any size: \p Exp2 template parameter of those classes can be \p false. + The size should be selected empirically for your application and hardware, there are no common rules for that. + Default is %opt::v::static_buffer< any_type, 4 > . + - opt::random_engine - a random engine to generate a random position in elimination array. + Default is opt::v::c_rand. + - opt::elimination_backoff - back-off strategy to wait for elimination, default is cds::backoff::delay<> + - opt::lock_type - a lock type used in elimination back-off, default is cds::lock::Spin. + */ + template < typename GC, typename T, CDS_DECL_OPTIONS11 > + class TreiberStack + : public +#ifdef CDS_DOXYGEN_INVOKED + intrusive::TreiberStack< GC, cds::intrusive::single_link::node< T >, Options... > +#else + treiber_stack::make_treiber_stack< GC, T, CDS_OPTIONS11 >::type +#endif + { + //@cond + typedef treiber_stack::make_treiber_stack< GC, T, CDS_OPTIONS11 > options; + typedef typename options::type base_class; + //@endcond + + public: + /// Rebind template arguments + template + struct rebind { + typedef TreiberStack< GC2, T2, CDS_OTHER_OPTIONS11> other ; ///< Rebinding result + }; + + public: + typedef T value_type ; ///< Value type stored in the stack + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename options::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_order option + typedef typename base_class::stat stat ; ///< Internal statistics policy used + + protected: + typedef typename options::node_type node_type ; ///< stack node type (derived from intrusive::single_link::node) + + //@cond + typedef typename options::cxx_allocator cxx_allocator; + typedef typename options::node_deallocator node_deallocator; // deallocate node + //@endcond + + protected: + ///@cond + static node_type * alloc_node( const value_type& val ) + { + return cxx_allocator().New( val ); + } +# ifdef CDS_EMPLACE_SUPPORT + template + static node_type * alloc_node_move( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( args )... ); + } +# endif + + static void free_node( node_type * p ) + { + node_deallocator()( p ); + } + static void retire_node( node_type * p ) + { + gc::template retire( p ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + public: + /// Constructs empty stack + TreiberStack() + {} + + /// Constructs empty stack and initializes elimination back-off data + /** + This form should be used if you use elimination back-off with dynamically allocated collision array, i.e + \p Options... contains cds::opt::buffer< cds::opt::v::dynamic_buffer >. + \p nCollisionCapacity parameter specifies the capacity of collision array. + */ + TreiberStack( size_t nCollisionCapacity ) + : base_class( nCollisionCapacity ) + {} + + /// Clears the stack on destruction + ~TreiberStack() + {} + + /// Push the item \p val on the stack + bool push( const value_type& val ) + { + scoped_node_ptr p( alloc_node(val)); + if ( base_class::push( *p )) { + p.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Pushes data of type \ref value_type constructed with std::forward(args)... + /** + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr p( alloc_node_move( std::forward(args)...)); + if ( base_class::push( *p )) { + p.release(); + return true; + } + return false; + } +# endif + + /// Pop an item from the stack + /** + The value of popped item is stored in \p val. + On success functions returns \p true, \p val contains value popped from the stack. + If stack is empty the function returns \p false, \p val is unchanged. + */ + bool pop( value_type& val ) + { + node_type * p = base_class::pop(); + if ( !p ) + return false; + + val = p->m_value; + retire_node( p ); + + return true; + } + + /// Check if stack is empty + bool empty() const + { + return base_class::empty(); + } + + /// Clear the stack + void clear() + { + base_class::clear(); + } + + /// Returns stack's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the stack + is empty. To check emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + }; + +}} // namespace cds::container + + +#endif // #ifndef __CDS_CONTAINER_TREIBER_STACK_H diff --git a/cds/container/tsigas_cycle_queue.h b/cds/container/tsigas_cycle_queue.h new file mode 100644 index 00000000..7ea351a7 --- /dev/null +++ b/cds/container/tsigas_cycle_queue.h @@ -0,0 +1,341 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_TSIGAS_CYCLE_QUEUE_H +#define __CDS_CONTAINER_TSIGAS_CYCLE_QUEUE_H + +#include +#include +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + template + struct make_tsigas_cycle_queue + { + typedef T value_type; + + struct default_options { + typedef cds::backoff::empty back_off; + typedef CDS_DEFAULT_ALLOCATOR allocator; + typedef atomicity::empty_item_counter item_counter; + typedef opt::v::relaxed_ordering memory_model; + enum { alignment = opt::cache_line_alignment }; + }; + + typedef typename opt::make_options< + typename cds::opt::find_type_traits< default_options, CDS_OPTIONS7 >::type + ,CDS_OPTIONS7 + >::type options; + + typedef typename options::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( value_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + typedef node_deallocator node_disposer; + + typedef intrusive::TsigasCycleQueue< + value_type + ,opt::buffer< typename options::buffer > + ,opt::back_off< typename options::back_off > + ,intrusive::opt::disposer< node_disposer > + ,opt::item_counter< typename options::item_counter > + ,opt::alignment< options::alignment > + ,opt::memory_model< typename options::memory_model > + > type; + }; + + } + //@endcond + + /// Non-blocking cyclic queue discovered by Philippas Tsigas and Yi Zhang + /** @ingroup cds_nonintrusive_queue + It is non-intrusive implementation of Tsigas & Zhang cyclic queue based on intrusive::TsigasCycleQueue. + + Source: + \li [2000] Philippas Tsigas, Yi Zhang "A Simple, Fast and Scalable Non-Blocking Concurrent FIFO Queue + for Shared Memory Multiprocessor Systems" + + \p T is a type stored in the queue. It should be default-constructible, copy-constructible, assignable type. + + Available \p Options: + - opt::buffer - buffer to store items. Mandatory option, see option description for full list of possible types. + - opt::allocator - allocator (like \p std::allocator). Default is \ref CDS_DEFAULT_ALLOCATOR + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. + - opt::alignment - the alignment for internal queue data. Default is opt::cache_line_alignment + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + + \par Examples: + \code + #include + + struct Foo { + ... + }; + + // Queue of Foo, capacity is 1024, statically allocated buffer: + typedef cds::intrusive::TsigasCycleQueue< + Foo + ,cds::opt::buffer< cds::opt::v::static_buffer< Foo, 1024 > > + > static_queue; + static_queue stQueue; + + // Queue of Foo, capacity is 1024, dynamically allocated buffer: + typedef cds::intrusive::TsigasCycleQueue< + Foo + ,cds::opt::buffer< cds::opt::v::dynamic_buffer< Foo > > + > dynamic_queue; + dynamic_queue dynQueue( 1024 ); + \endcode + */ + template + class TsigasCycleQueue: +#ifdef CDS_DOXYGEN_INVOKED + intrusive::TsigasCycleQueue< T, Options... > +#else + details::make_tsigas_cycle_queue< T, CDS_OPTIONS7 >::type +#endif + { + //@cond + typedef details::make_tsigas_cycle_queue< T, CDS_OPTIONS7 > options; + typedef typename options::type base_class; + //@endcond + public: + typedef T value_type ; ///< Value type stored in the stack + + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator_type allocator_type ; ///< Allocator type used for allocate/deallocate the nodes + typedef typename options::options::item_counter item_counter ; ///< Item counting policy used + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + /// Rebind template arguments + template + struct rebind { + typedef TsigasCycleQueue< T2, CDS_OTHER_OPTIONS7> other ; ///< Rebinding result + }; + + protected: + //@cond + typedef typename options::cxx_allocator cxx_allocator; + typedef typename options::node_deallocator node_deallocator; // deallocate node + typedef typename options::node_disposer node_disposer; + //@endcond + + protected: + ///@cond + static value_type * alloc_node() + { + return cxx_allocator().New(); + } + static value_type * alloc_node( const value_type& val ) + { + return cxx_allocator().New( val ); + } +# ifdef CDS_EMPLACE_SUPPORT + template + static value_type * alloc_node_move( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( args )... ); + } +# endif + static void free_node( value_type * p ) + { + node_deallocator()( p ); + } + + struct node_disposer2 { + void operator()( value_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< value_type, node_disposer2 > scoped_node_ptr; + //@endcond + + public: + /// Initialize empty queue of capacity \p nCapacity + /** + For cds::opt::v::static_buffer the \p nCapacity parameter is ignored. + + Note that the real capacity of queue is \p nCapacity - 2. + */ + TsigasCycleQueue( size_t nCapacity = 0 ) + : base_class( nCapacity ) + {} + + /// Returns queue's item count (see \ref intrusive::TsigasCycleQueue::size for explanation) + size_t size() const + { + return base_class::size(); + } + + /// Returns capacity of cyclic buffer + /** + Warning: real capacity of queue is two less than returned value of this function. + */ + size_t capacity() const + { + return base_class::capacity(); + } + + /// Enqueues \p val value into the queue. + /** + The function makes queue node in dynamic memory calling copy constructor for \p val + and then it calls intrusive::TsigasCycleQueue::enqueue. + Returns \p true if success, \p false otherwise. + */ + bool enqueue( value_type const& val ) + { + scoped_node_ptr p( alloc_node(val)); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Enqueues \p data to queue using copy functor + /** + \p Func is a functor called to copy value \p data of type \p Type + which may be differ from type \p T stored in the queue. + The functor's interface is: + \code + struct myFunctor { + void operator()(T& dest, SOURCE const& data) + { + // // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + + Requirements The functor \p Func should not throw any exception. + */ + template + bool enqueue( const Type& data, Func f ) + { + scoped_node_ptr p( alloc_node()); + unref(f)( *p, data ); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Enqueues data of type \ref value_type constructed with std::forward(args)... + /** + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr p ( alloc_node_move( std::forward(args)...)); + if ( base_class::enqueue( *p)) { + p.release(); + return true; + } + return false; + } +# endif + + /// Dequeues a value using copy functor + /** + \p Func is a functor called to copy dequeued value to \p dest of type \p Type + which may be differ from type \p T stored in the queue. + The functor's interface is: + \code + struct myFunctor { + void operator()(Type& dest, T const& data) + { + // // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + + Requirements The functor \p Func should not throw any exception. + */ + template + bool dequeue( Type& dest, Func f ) + { + value_type * p = base_class::dequeue(); + if ( p ) { + unref(f)( dest, *p ); + node_disposer()( p ); + return true; + } + return false; + } + + /// Dequeues a value from the queue + /** + If queue is not empty, the function returns \p true, \p dest contains copy of + dequeued value. The assignment operator for type \ref value_type is invoked. + If queue is empty, the function returns \p false, \p dest is unchanged. + */ + bool dequeue( value_type& dest ) + { + typedef cds::details::trivial_assign functor; + return dequeue( dest, functor() ); + } + + /// Synonym for \ref enqueue function + bool push( const value_type& val ) + { + return enqueue( val ); + } + + /// Synonym for template version of \ref enqueue function + template + bool push( const Type& data, Func f ) + { + return enqueue( data, f ); + } + + /// Synonym for \ref dequeue function + bool pop( value_type& dest ) + { + return dequeue( dest ); + } + + /// Synonym for template version of \ref dequeue function + template + bool pop( Type& dest, Func f ) + { + return dequeue( dest, f ); + } + + /// Checks if the queue is empty + bool empty() const + { + return base_class::empty(); + } + + /// Clear the queue + /** + The function repeatedly calls \ref dequeue until it returns NULL. + */ + void clear() + { + base_class::clear(); + } + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_CONTAINER_TSIGAS_CYCLE_QUEUE_H diff --git a/cds/container/vyukov_mpmc_cycle_queue.h b/cds/container/vyukov_mpmc_cycle_queue.h new file mode 100644 index 00000000..0b871d13 --- /dev/null +++ b/cds/container/vyukov_mpmc_cycle_queue.h @@ -0,0 +1,392 @@ +//$$CDS-header$$ + +#ifndef __CDS_CONTAINER_VYUKOV_MPMC_CYCLE_QUEUE_H +#define __CDS_CONTAINER_VYUKOV_MPMC_CYCLE_QUEUE_H + +#include +#include +#include +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Vyukov's MPMC bounded queue + /** @ingroup cds_nonintrusive_queue + This algorithm is developed by Dmitry Vyukov (see http://www.1024cores.net) + It's multi-producer multi-consumer (MPMC), array-based, fails on overflow, does not require GC, w/o priorities, causal FIFO, + blocking producers and consumers queue. The algorithm is pretty simple and fast. It's not lock-free in the official meaning, + just implemented by means of atomic RMW operations w/o mutexes. + + The cost of enqueue/dequeue is 1 CAS per operation. + No dynamic memory allocation/management during operation. Producers and consumers are separated from each other (as in the two-lock queue), + i.e. do not touch the same data while queue is not empty. + + \par Source: + http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue + + \par Template parameters + \li \p T - type stored in queue. + \li \p Options - queue's options + + Options \p Options are: + - opt::buffer - buffer to store items. Mandatory option, see option description for full list of possible types. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter + - opt::value_cleaner - a functor to clean item dequeued. Default value is \ref opt::v::destruct_cleaner + that calls the destructor of type \p T. + After an item is dequeued, \p value_cleaner cleans the cell that the item has been occupied. If \p T + is a complex type, \p value_cleaner may be the useful feature. + - opt::alignment - the alignment for internal queue data. Default is opt::cache_line_alignment + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + + \par License + Simplified BSD license by Dmitry Vyukov (http://www.1024cores.net/site/1024cores/home/code-license) + + \par Example + \code + #include + + // // Queue with 1024 item static buffer + cds::container::vyukov_mpmc_bounded< + int + ,cds::opt::buffer< cds::opt::v::static_buffer > + > myQueue; + \endcode + */ + template + class VyukovMPMCCycleQueue + : public cds::bounded_container + { + protected: + //@cond + struct default_options + { + typedef cds::opt::v::destruct_cleaner value_cleaner; + typedef atomicity::empty_item_counter item_counter; + typedef opt::v::empty_disposer disposer ; // for intrusive version only + typedef opt::v::relaxed_ordering memory_model; + enum { alignment = opt::cache_line_alignment }; + }; + //@endcond + + public: + //@cond + typedef typename opt::make_options< + typename cds::opt::find_type_traits< default_options, CDS_OPTIONS6 >::type + ,CDS_OPTIONS6 + >::type options; + //@endcond + + protected: + //@cond + typedef typename options::value_cleaner value_cleaner; + //@endcond + + public: + typedef T value_type ; ///< @anchor cds_container_VyukovMPMCCycleQueue_value_type type of value stored in the queue + typedef typename options::item_counter item_counter ; ///< Item counter type + typedef typename options::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + /// Rebind template arguments + template + struct rebind { + typedef VyukovMPMCCycleQueue< T2, CDS_OTHER_OPTIONS6> other ; ///< Rebinding result + }; + + protected: + //@cond + typedef CDS_ATOMIC::atomic sequence_type; + struct cell_type + { + sequence_type sequence; + value_type data; + + cell_type() + {} + }; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct copy_construct { + void operator()( value_type& dest, value_type const& src ) + { + new ( &dest ) value_type( src ); + } + }; +# endif + + typedef cds::details::trivial_assign< value_type, value_type > copy_assign; + + typedef typename options::buffer::template rebind::other buffer; + typedef typename opt::details::alignment_setter< sequence_type, options::alignment >::type aligned_sequence_type; + typedef typename opt::details::alignment_setter< buffer, options::alignment >::type aligned_buffer; + //@endcond + + protected: + //@cond + aligned_buffer m_buffer; + size_t const m_nBufferMask; + aligned_sequence_type m_posEnqueue; + aligned_sequence_type m_posDequeue; + item_counter m_ItemCounter; + //@endcond + + public: + /// Constructs the queue of capacity \p nCapacity + /** + For cds::opt::v::static_buffer the \p nCapacity parameter is ignored. + */ + VyukovMPMCCycleQueue( + size_t nCapacity = 0 + ) + : m_buffer( nCapacity ) + , m_nBufferMask( m_buffer.capacity() - 1 ) + { + nCapacity = m_buffer.capacity(); + + // Buffer capacity must be power of 2 + assert( nCapacity >= 2 && (nCapacity & (nCapacity - 1)) == 0 ); + + for (size_t i = 0; i != nCapacity; i += 1) + m_buffer[i].sequence.store(i, memory_model::memory_order_relaxed); + + m_posEnqueue.store(0, memory_model::memory_order_relaxed); + m_posDequeue.store(0, memory_model::memory_order_relaxed); + } + + ~VyukovMPMCCycleQueue() + { + clear(); + } + + /// Enqueues \p data to queue using copy functor + /** @anchor cds_container_VyukovMPMCCycleQueue_enqueue_func + \p Func is a functor called to copy value \p data of type \p Source + which may be differ from type \p T stored in the queue. + The functor's interface is: + \code + struct myFunctor { + void operator()(T& dest, Source const& data) + { + // // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p f by reference. + + Requirements The functor \p Func should not throw any exception. + */ + template + bool enqueue(Source const& data, Func func) + { + cell_type* cell; + size_t pos = m_posEnqueue.load(memory_model::memory_order_relaxed); + + for (;;) + { + cell = &m_buffer[pos & m_nBufferMask]; + size_t seq = cell->sequence.load(memory_model::memory_order_acquire); + + intptr_t dif = (intptr_t)seq - (intptr_t)pos; + + if (dif == 0) + { + if ( m_posEnqueue.compare_exchange_weak(pos, pos + 1, memory_model::memory_order_relaxed) ) + break; + } + else if (dif < 0) + return false; + else + pos = m_posEnqueue.load(memory_model::memory_order_relaxed); + } + + unref(func)( cell->data, data ); + + cell->sequence.store(pos + 1, memory_model::memory_order_release); + ++m_ItemCounter; + + return true; + } + + /// @anchor cds_container_VyukovMPMCCycleQueue_enqueue Enqueues \p data to queue + bool enqueue(value_type const& data ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return enqueue( data, [](value_type& dest, value_type const& src){ new ( &dest ) value_type( src ); }); +# else + return enqueue( data, copy_construct() ); +# endif + } + +# ifdef CDS_EMPLACE_SUPPORT + /// Enqueues data of type \ref cds_container_VyukovMPMCCycleQueue_value_type "value_type" constructed with std::forward(args)... + /** + This function is available only for compiler that supports + variadic template and move semantics + */ + template + bool emplace( Args&&... args ) + { + cell_type* cell; + size_t pos = m_posEnqueue.load(memory_model::memory_order_relaxed); + + for (;;) + { + cell = &m_buffer[pos & m_nBufferMask]; + size_t seq = cell->sequence.load(memory_model::memory_order_acquire); + + intptr_t dif = (intptr_t)seq - (intptr_t)pos; + + if (dif == 0) + { + if ( m_posEnqueue.compare_exchange_weak(pos, pos + 1, memory_model::memory_order_relaxed) ) + break; + } + else if (dif < 0) + return false; + else + pos = m_posEnqueue.load(memory_model::memory_order_relaxed); + } + + new ( &cell->data ) value_type( std::forward(args)... ); + + cell->sequence.store(pos + 1, memory_model::memory_order_release); + ++m_ItemCounter; + + return true; + + } +# endif + + /// Dequeues an item from queue + /** @anchor cds_container_VyukovMPMCCycleQueue_dequeue_func + \p Func is a functor called to copy dequeued value of type \p T to \p dest of type \p Dest. + The functor's interface is: + \code + struct myFunctor { + void operator()(Dest& dest, T const& data) + { + // // Code to copy \p data to \p dest + dest = data; + } + }; + \endcode + You may use \p boost:ref construction to pass functor \p func by reference. + + Requirements The functor \p Func should not throw any exception. + */ + template + bool dequeue( Dest& data, Func func ) + { + cell_type * cell; + size_t pos = m_posDequeue.load(memory_model::memory_order_relaxed); + + for (;;) + { + cell = &m_buffer[pos & m_nBufferMask]; + size_t seq = cell->sequence.load(memory_model::memory_order_acquire); + intptr_t dif = (intptr_t)seq - (intptr_t)(pos + 1); + + if (dif == 0) { + if ( m_posDequeue.compare_exchange_weak(pos, pos + 1, memory_model::memory_order_relaxed)) + break; + } + else if (dif < 0) + return false; + else + pos = m_posDequeue.load(memory_model::memory_order_relaxed); + } + + unref(func)( data, cell->data ); + value_cleaner()( cell->data ); + --m_ItemCounter; + cell->sequence.store( pos + m_nBufferMask + 1, memory_model::memory_order_release ); + + return true; + } + + /// Dequeues an item from queue to \p data + /** @anchor cds_container_VyukovMPMCCycleQueue_dequeue + If queue is empty, returns \p false, \p data is unchanged. + */ + bool dequeue(value_type & data ) + { + return dequeue( data, copy_assign() ); + } + + /// Synonym of \ref cds_container_VyukovMPMCCycleQueue_enqueue "enqueue" + bool push(value_type const& data) + { + return enqueue(data); + } + + /// Synonym for template version of \ref cds_container_VyukovMPMCCycleQueue_enqueue_func "enqueue" function + template + bool push( const Source& data, Func f ) + { + return enqueue( data, f ); + } + + /// Synonym of \ref cds_container_VyukovMPMCCycleQueue_dequeue "dequeue" + bool pop(value_type& data) + { + return dequeue(data); + } + + /// Synonym for template version of \ref cds_container_VyukovMPMCCycleQueue_dequeue_func "dequeue" function + template + bool pop( Type& dest, Func f ) + { + return dequeue( dest, f ); + } + + /// Checks if the queue is empty + bool empty() const + { + const cell_type * cell; + size_t pos = m_posDequeue.load(memory_model::memory_order_relaxed); + + for (;;) + { + cell = &m_buffer[pos & m_nBufferMask]; + size_t seq = cell->sequence.load(memory_model::memory_order_acquire); + intptr_t dif = (intptr_t)seq - (intptr_t)(pos + 1); + + if (dif == 0) + return false; + else if (dif < 0) + return true; + else + pos = m_posDequeue.load(memory_model::memory_order_relaxed); + } + } + + /// Clears the queue + void clear() + { + value_type v; + while ( pop(v) ); + } + + /// Returns queue's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns capacity of cyclic buffer + size_t capacity() const + { + return m_buffer.capacity(); + } + }; +}} // namespace cds::container + +#endif // #ifndef __CDS_CONTAINER_VYUKOV_MPMC_CYCLE_QUEUE_H diff --git a/cds/cxx11_atomic.h b/cds/cxx11_atomic.h new file mode 100644 index 00000000..d61a49a6 --- /dev/null +++ b/cds/cxx11_atomic.h @@ -0,0 +1,378 @@ +//$$CDS-header$$ + +#ifndef __CDS_CXX11_ATOMIC_H +#define __CDS_CXX11_ATOMIC_H + +#include + +namespace cds { + +/// C++11 Atomic library support +/** @ingroup cds_cxx11_stdlib_wrapper + libcds has an implementation of C++11 atomic library (header ) + specified in N3242, p.29. + + This implementation has full support + - atomic class and its specializations for integral types and pointers + - atomic_flag class + - free atomic_xxx functions + + Exclusions: the following features specified in C++11 standard are not implemented: + - Atomic emulation. The library implements only genuine atomic operations for supported processors + - Static initialization macros (like \p ATOMIC_FLAG_INIT and others) + - \p atomic_init functions + + Internal atomic implementation is used when the standard library provided by compiler + has no C++11 \ header or it is not standard compliant, + or when \p CDS_USE_LIBCDS_ATOMIC preprocessor macro is explicitly defined in compiler command line. + The library defines \p CDS_ATOMIC macro that specifies atomic library namespace: + - \p std for compiler-provided \ library + - \p boost if you use boost.atomic library (see note below) + - \p cds::cxx11_atomic if internal \p libcds atomic implementation used + + The library has internal atomic implementation for the following processor architectures: + - Intel and AMD x86 (32bit) and amd64 (64bit) + - Intel Itanium IA64 (64bit) + - UltraSparc (64bit) + + Using \p CDS_ATOMIC macro you may call \ library functions and classes, + for example: + \code + CDS_ATOMIC::atomic atomInt; + CDS_ATOMIC::atomic_store_explicit( &atomInt, 0, CDS_ATOMIC::memory_order_release ); + \endcode + + \par Microsoft Visual C++ + + MS Visual C++ has native \ header beginning from Visual C++ 2012. + However, MSVC++ 2012 has a quite inefficient implementation on atomic load/store + based on \p compare_exchange, so \p libcds does not use MSVC++ 2012 atomics. + The \p libcds library defines \p CDS_ATOMIC as + - \p cds::cxx11_atomic (internal implementation) for MS VC++ 2008, 2010, and 2012 + - \p std for MS VC++ 2013 and above. + + \par GCC + + For GCC compiler the macro \p CDS_ATOMIC is defined as: + - \p cds::cxx11_atomic by default + - \p std if the compiler version is 4.6 and \p CDS_CXX11_ATOMIC_GCC is defined (see below) + - \p std for GCC 4.7 and above + + GCC team implements full support for C++11 memory model in version 4.7 + (see http://gcc.gnu.org/wiki/Atomic/GCCMM). + \p libcds uses its own implementation of C++11 \ library located in + file for GCC version up to 4.6. This implementation almost conforms to C++11 standard draft + N3242 (see exclusions above) + that is closest to final version. + However, GCC 4.6 has the implementation of \ header in its libstdc++ + that is built on __sync_xxx (or __atomic_xxx) built-in functions. You can use libcds with GCC 4.6 + \ specifying \p CDS_CXX11_ATOMIC_GCC macro in g++ command line: + \code g++ -DCDS_CXX11_ATOMIC_GCC ... \endcode + GCC 4.6 atomic implementation does not support atomic for any type \p T. The linker + generates "undefined symbol" error for atomic if \p T is not an integral type or a pointer. It is + not essential for intrusive and non-intrusive containers represented in \p libcds. + However, cds::memory::michael memory allocator cannot be linked with GCC 4.6 \ header. + This error has been fixed in GCC 4.7. + + \par Clang + + The macro \p CDS_ATOMIC is defined as \p cds::cxx11_atomic. + \p libcds does not yet use native clang atomics. + + \par boost::atomic + + Beginning from version 1.54, boost library contains an implementation of atomic + sufficient for \p libcds. + You can compile \p libcds and your projects with boost.atomic specifying \p -DCDS_USE_BOOST_ATOMIC + in compiler's command line. +*/ +namespace cxx11_atomics { +}} // namespace cds::cxx11_atomics + +//@cond +#if defined(CDS_USE_BOOST_ATOMIC) +# include +# if BOOST_VERSION >= 105400 +# include +# define CDS_ATOMIC boost +# define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace boost { +# define CDS_CXX11_ATOMIC_END_NAMESPACE } +# else +# error "Boost version 1.54 or above is needed for boost.atomic" +# endif +#elif CDS_CXX11_ATOMIC_SUPPORT == 1 && !defined(CDS_USE_LIBCDS_ATOMIC) + // Compiler supports C++11 atomic (conditionally defined in cds/details/defs.h) +# include +# include +# define CDS_ATOMIC std +# define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace std { +# define CDS_CXX11_ATOMIC_END_NAMESPACE } +# include +#else +# include +# define CDS_ATOMIC cds::cxx11_atomics +# define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace cds { namespace cxx11_atomics { +# define CDS_CXX11_ATOMIC_END_NAMESPACE }} +#endif +//@endcond + +namespace cds { + + /// Atomic primitives + /** + This namespace contains useful primitives derived from std::atomic. + */ + namespace atomicity { + + /// Atomic event counter. + /** + This class is based on std::atomic_size_t. + It uses relaxed memory ordering \p memory_order_relaxed and may be used as a statistic counter. + */ + class event_counter + { + //@cond + CDS_ATOMIC::atomic_size_t m_counter; + //@endcond + + public: + typedef size_t value_type ; ///< Type of counter + + public: + // Initializes event counter with zero + event_counter() CDS_NOEXCEPT + : m_counter(size_t(0)) + {} + + /// Assign operator + /** + Returns \p n. + */ + value_type operator =( + value_type n //< new value of the counter + ) CDS_NOEXCEPT + { + m_counter.exchange( n, CDS_ATOMIC::memory_order_relaxed ); + return n; + } + + /// Addition + /** + Returns new value of the atomic counter. + */ + size_t operator +=( + size_t n ///< addendum + ) CDS_NOEXCEPT + { + return m_counter.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ) + n; + } + + /// Substraction + /** + Returns new value of the atomic counter. + */ + size_t operator -=( + size_t n ///< subtrahend + ) CDS_NOEXCEPT + { + return m_counter.fetch_sub( n, CDS_ATOMIC::memory_order_relaxed ) - n; + } + + /// Get current value of the counter + operator size_t () const CDS_NOEXCEPT + { + return m_counter.load( CDS_ATOMIC::memory_order_relaxed ); + } + + /// Preincrement + size_t operator ++() CDS_NOEXCEPT + { + return m_counter.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ) + 1; + } + /// Postincrement + size_t operator ++(int) CDS_NOEXCEPT + { + return m_counter.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Predecrement + size_t operator --() CDS_NOEXCEPT + { + return m_counter.fetch_sub( 1, CDS_ATOMIC::memory_order_relaxed ) - 1; + } + /// Postdecrement + size_t operator --(int) CDS_NOEXCEPT + { + return m_counter.fetch_sub( 1, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Get current value of the counter + size_t get() const CDS_NOEXCEPT + { + return m_counter.load( CDS_ATOMIC::memory_order_relaxed ); + } + + /// Resets the counter to 0 + void reset() CDS_NOEXCEPT + { + m_counter.store( 0, CDS_ATOMIC::memory_order_release ); + } + + }; + + /// Atomic item counter + /** + This class is simplified interface around std::atomic_size_t. + The class supports getting of current value of the counter and increment/decrement its value. + */ + class item_counter + { + public: + typedef CDS_ATOMIC::atomic_size_t atomic_type ; ///< atomic type used + typedef size_t counter_type ; ///< Integral item counter type (size_t) + + private: + //@cond + atomic_type m_Counter ; ///< Atomic item counter + //@endcond + + public: + /// Default ctor initializes the counter to zero. + item_counter() + : m_Counter(counter_type(0)) + {} + + /// Returns current value of the counter + counter_type value(CDS_ATOMIC::memory_order order = CDS_ATOMIC::memory_order_relaxed) const + { + return m_Counter.load( order ); + } + + /// Same as \ref value() with relaxed memory ordering + operator counter_type() const + { + return value(); + } + + /// Returns underlying atomic interface + atomic_type& getAtomic() + { + return m_Counter; + } + + /// Returns underlying atomic interface (const) + const atomic_type& getAtomic() const + { + return m_Counter; + } + + /// Increments the counter. Semantics: postincrement + counter_type inc(CDS_ATOMIC::memory_order order = CDS_ATOMIC::memory_order_relaxed ) + { + return m_Counter.fetch_add( 1, order ); + } + + /// Decrements the counter. Semantics: postdecrement + counter_type dec(CDS_ATOMIC::memory_order order = CDS_ATOMIC::memory_order_relaxed) + { + return m_Counter.fetch_sub( 1, order ); + } + + /// Preincrement + counter_type operator ++() + { + return inc() + 1; + } + /// Postincrement + counter_type operator ++(int) + { + return inc(); + } + + /// Predecrement + counter_type operator --() + { + return dec() - 1; + } + /// Postdecrement + counter_type operator --(int) + { + return dec(); + } + + /// Resets count to 0 + void reset(CDS_ATOMIC::memory_order order = CDS_ATOMIC::memory_order_relaxed) + { + m_Counter.store( 0, order ); + } + }; + + /// Empty item counter + /** + This class may be used instead of \ref item_counter when you do not need full \ref item_counter interface. + All methods of the class is empty and returns 0. + + The object of this class should not be used in data structure that behavior significantly depends on item counting + (for example, in many hash map implementation). + */ + class empty_item_counter { + public: + typedef size_t counter_type ; ///< Counter type + public: + /// Returns 0 + counter_type value(CDS_ATOMIC::memory_order /*order*/ = CDS_ATOMIC::memory_order_relaxed) const + { + return 0; + } + + /// Same as \ref value(), always returns 0. + operator counter_type() const + { + return value(); + } + + /// Dummy increment. Always returns 0 + size_t inc(CDS_ATOMIC::memory_order /*order*/ = CDS_ATOMIC::memory_order_relaxed) + { + return 0; + } + + /// Dummy increment. Always returns 0 + size_t dec(CDS_ATOMIC::memory_order /*order*/ = CDS_ATOMIC::memory_order_relaxed) + { + return 0; + } + + /// Dummy pre-increment. Always returns 0 + size_t operator ++() + { + return 0; + } + /// Dummy post-increment. Always returns 0 + size_t operator ++(int) + { + return 0; + } + + /// Dummy pre-decrement. Always returns 0 + size_t operator --() + { + return 0; + } + /// Dummy post-decrement. Always returns 0 + size_t operator --(int) + { + return 0; + } + + /// Dummy function + void reset(CDS_ATOMIC::memory_order /*order*/ = CDS_ATOMIC::memory_order_relaxed) + {} + }; + + + } // namespace atomicity + +} // namespace cds + +#endif // #ifndef __CDS_CXX11_ATOMIC_H diff --git a/cds/details/aligned_allocator.h b/cds/details/aligned_allocator.h new file mode 100644 index 00000000..85ff72a4 --- /dev/null +++ b/cds/details/aligned_allocator.h @@ -0,0 +1,158 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_ALIGNED_ALLOCATOR_H +#define __CDS_DETAILS_ALIGNED_ALLOCATOR_H + +#include +#include + +namespace cds { namespace details { + + /// Allocator for aligned data + /** + The class is the wrapper around user-defined aligned allocator. + Template parameters: + \li \p T is a type to allocate + \li \p ALIGNED_ALLOCATOR is an aligned allocator implementation. Default implementation is defined by macro + CDS_DEFAULT_ALIGNED_ALLOCATOR from cds/user_setup/allocator.h header file. + + The \p nAlign parameter of member function specifyes desired aligment of data allocated. + + \par Note + When an array allocation is performed the allocator guarantees the alignment for first element of array only. + To guarantee the alignment for each element of the array the size of type \p T must be multiple of \p nAlign: + \code + sizeof(T) % nAlign == 0 + \endcode + */ + template < + typename T + , typename ALIGNED_ALLOCATOR = CDS_DEFAULT_ALIGNED_ALLOCATOR + > + class AlignedAllocator: public ALIGNED_ALLOCATOR::template rebind::other + { + public: + /// Underlying aligned allocator type + typedef typename ALIGNED_ALLOCATOR::template rebind::other allocator_type; + +# ifdef CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT + /// Analogue of operator new T(\p src... ) + template + T * New( size_t nAlign, const S&... src ) + { + return Construct( allocator_type::allocate( nAlign, 1), src... ); + } +# else + //@cond + /// Analogue of operator new T + T * New( size_t nAlign ) + { + return Construct( allocator_type::allocate(nAlign, 1) ); + } + + /// Analogue of operator new T(\p src ) + template + T * New( size_t nAlign, const S& src ) + { + return Construct( allocator_type::allocate( nAlign, 1), src ); + } + + /// Analogue of operator new T( \p s1, \p s2 ) + template + T * New( size_t nAlign, const S1& s1, const S2& s2 ) + { + return Construct( allocator_type::allocate( nAlign, 1 ), s1, s2 ); + } + + /// Analogue of operator new T( \p s1, \p s2, \p s3 ) + template + T * New( size_t nAlign, const S1& s1, const S2& s2, const S3& s3 ) + { + return Construct( allocator_type::allocate(nAlign, 1), s1, s2, s3 ); + } + //@endcond +# endif + + /// Analogue of operator new T[\p nCount ] + T * NewArray( size_t nAlign, size_t nCount ) + { + T * p = allocator_type::allocate( nAlign, nCount ); + for ( size_t i = 0; i < nCount; ++i ) + Construct( p + i ); + return p; + } + + /// Analogue of operator new T[\p nCount ]. + /** + Each item of array of type T is initialized by parameter \p src. + */ + template + T * NewArray( size_t nAlign, size_t nCount, const S& src ) + { + T * p = allocator_type::allocate( nAlign, nCount ); + for ( size_t i = 0; i < nCount; ++i ) + Construct( p + i, src ); + return p; + } + + /// Analogue of operator delete + void Delete( T * p ) + { + allocator_type::destroy( p ); + allocator_type::deallocate( p, 1 ); + } + + /// Analogue of operator delete [] + void Delete( T * p, size_t nCount ) + { + for ( size_t i = 0; i < nCount; ++i ) + allocator_type::destroy( p + i ); + allocator_type::deallocate( p, nCount ); + } + +# ifdef CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT + /// Analogue of placement operator new( \p p ) T( \p src... ) + template + T * Construct( void * p, const S&... src ) + { + return new( p ) T( src... ); + } +# else + /// Analogue of placement operator new( \p p ) T + T * Construct( void * p ) + { + return new( p ) T; + } + + /// Analogue of placement operator new( \p p ) T( \p src ) + template + T * Construct( void * p, const S& src ) + { + return new( p ) T( src ); + } + + /// Analogue of placement operator new( \p p ) T( \p s1, \p s2 ) + template + T * Construct( void * p, const S1& s1, const S2& s2 ) + { + return new( p ) T( s1, s2 ); + } + + /// Analogue of placement operator new( \p p ) T( \p s1, \p s2, \p s3 ) + template + T * Construct( void * p, const S1& s1, const S2& s2, const S3& s3 ) + { + return new( p ) T( s1, s2, s3 ); + } +# endif + + /// Rebinds allocator to other type \p Q instead of \p T + template + struct rebind { + typedef AlignedAllocator< Q, typename ALIGNED_ALLOCATOR::template rebind::other > other ; ///< Rebinding result + }; + }; + +}} // namespace cds::details + +#endif // #ifndef __CDS_DETAILS_ALIGNED_ALLOCATOR_H diff --git a/cds/details/aligned_type.h b/cds/details/aligned_type.h new file mode 100644 index 00000000..0600827a --- /dev/null +++ b/cds/details/aligned_type.h @@ -0,0 +1,80 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_ALIGNED_TYPE_H +#define __CDS_DETAILS_ALIGNED_TYPE_H + +#include + +namespace cds { namespace details { + + /// Aligned type + /** + This meta-algorithm solves compiler problem when you need to declare a type \p T with alignment + equal to another type alignment. For example, the following declaration produces an error in Microsoft Visual Studio 2008 compiler: + \code + typedef double my_double; + typedef __declspec(align( __alignof(my_double) )) int aligned_int; + \endcode + In MS VS, the __declspec(align(N)) construction requires that N must be a integer constant (1, 2, 4 and so on) + but not an integer constant expression. + + The result of this meta-algo is a type \p aligned_type::type that is \p T aligned by \p Alignment. + For example, with \p aligned_type the prevoius example will not generate an error: + \code + typedef double my_double; + typedef typename cds::details::aligned_type::type aligned_int; + \endcode + and result of this declaration is equivalent to + \code + typedef __declspec(align(8)) int aligned_int; + \endcode + + The \p Alignment template parameter must be a constant expression and its result must be power of two. + The maximum of its value is 1024. + + See also \ref align_as + */ + template + struct aligned_type +#ifdef CDS_DOXYGEN_INVOKED + {} +#endif +; + + //@cond none +# define CDS_ALIGNED_TYPE_impl(nAlign) template struct aligned_type { typedef CDS_TYPE_ALIGNMENT(nAlign) T type; } + CDS_ALIGNED_TYPE_impl(1); + CDS_ALIGNED_TYPE_impl(2); + CDS_ALIGNED_TYPE_impl(4); + CDS_ALIGNED_TYPE_impl(8); + CDS_ALIGNED_TYPE_impl(16); + CDS_ALIGNED_TYPE_impl(32); + CDS_ALIGNED_TYPE_impl(64); + CDS_ALIGNED_TYPE_impl(128); + CDS_ALIGNED_TYPE_impl(256); + CDS_ALIGNED_TYPE_impl(512); + CDS_ALIGNED_TYPE_impl(1024); +# undef CDS_ALIGNED_TYPE_impl + //@endcond + + /** Alignment by example + + This meta-algo is similar to \ref aligned_type . + + For example, the following code + \code + typedef typename cds::details::align_as::type aligned_int; + \endcode + declares type \p aligned_int which is \p int aligned like \p double. + + See also: \ref aligned_type + */ + template + struct align_as { + /// Result of meta-algo: type \p T aligned like type \p AlignAs + typedef typename aligned_type::type type; + }; + +}} // namespace cds::details + +#endif // #ifndef __CDS_DETAILS_ALIGNED_TYPE_H diff --git a/cds/details/allocator.h b/cds/details/allocator.h new file mode 100644 index 00000000..7070c1fb --- /dev/null +++ b/cds/details/allocator.h @@ -0,0 +1,307 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_ALLOCATOR_H +#define __CDS_DETAILS_ALLOCATOR_H + +/* + Allocator class for the library. Supports allocating and constructing of objects + + Editions: + 2008.03.08 Maxim.Khiszinsky Created +*/ + +#include +#include +#include +#include +#include + +namespace cds { + namespace details { + + /// Extends \p std::allocator interface to provide semantics like operator \p new and \p delete + /** + The class is the wrapper around underlying \p Alloc class. + \p Alloc provides the \p std::allocator interface. + */ + template + class Allocator + : public std::conditional< + std::is_same< T, typename Alloc::value_type>::value + , Alloc + , typename Alloc::template rebind::other + >::type + { + public: + /// Underlying allocator type + typedef typename std::conditional< + std::is_same< T, typename Alloc::value_type>::value + , Alloc + , typename Alloc::template rebind::other + >::type allocator_type; + + /// Element type + typedef T value_type; + +# ifdef CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT + /// Analogue of operator new T(\p src... ) + template + value_type * New( S const&... src ) + { + return Construct( allocator_type::allocate(1), src... ); + } +# else + //@cond + /// Analogue of operator new T + value_type * New() + { + return Construct( allocator_type::allocate(1) ); + } + + /// Analogue of operator new T(\p src ) + template + value_type * New( S const& src ) + { + return Construct( allocator_type::allocate(1), src ); + } + + /// Analogue of operator new T( \p s1, \p s2 ) + template + value_type * New( S1 const& s1, S2 const& s2 ) + { + return Construct( allocator_type::allocate(1), s1, s2 ); + } + + /// Analogue of operator new T( \p s1, \p s2, \p s3 ) + template + value_type * New( S1 const& s1, S2 const& s2, S3 const& s3 ) + { + return Construct( allocator_type::allocate(1), s1, s2, s3 ); + } + //@endcond +# endif + +# ifdef CDS_EMPLACE_SUPPORT + /// Analogue of operator new T( std::forward(args)... ) (move semantics) + /** + This function is available only for compiler that supports + variadic template and move semantics + */ + template + value_type * MoveNew( Args&&... args ) + { + return MoveConstruct( allocator_type::allocate(1), std::forward(args)... ); + } +# endif + + + /// Analogue of operator new T[\p nCount ] + value_type * NewArray( size_t nCount ) + { + value_type * p = allocator_type::allocate( nCount ); + for ( size_t i = 0; i < nCount; ++i ) + Construct( p + i ); + return p; + } + + /// Analogue of operator new T[\p nCount ]. + /** + Each item of array of type T is initialized by parameter \p src: T( src ) + */ + template + value_type * NewArray( size_t nCount, S const& src ) + { + value_type * p = allocator_type::allocate( nCount ); + for ( size_t i = 0; i < nCount; ++i ) + Construct( p + i, src ); + return p; + } + +# ifdef CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT +# if CDS_COMPILER == CDS_COMPILER_INTEL + //@cond + value_type * NewBlock( size_t nSize ) + { + return Construct( heap_alloc( nSize )); + } + //@endcond +# endif + /// Allocates block of memory of size at least \p nSize bytes. + /** + Internally, the block is allocated as an array of \p void* pointers, + then \p Construct() method is called to initialize \p T. + + Precondition: nSize >= sizeof(T) + */ + template + value_type * NewBlock( size_t nSize, S const&... src ) + { + return Construct( heap_alloc( nSize ), src... ); + } +# else + //@cond + value_type * NewBlock( size_t nSize ) + { + return Construct( heap_alloc( nSize )); + } + template + value_type * NewBlock( size_t nSize, S const& arg ) + { + return Construct( heap_alloc( nSize ), arg ); + } + template + value_type * NewBlock( size_t nSize, S1 const& arg1, S2 const& arg2 ) + { + return Construct( heap_alloc( nSize ), arg1, arg2 ); + } + template + value_type * NewBlock( size_t nSize, S1 const& arg1, S2 const& arg2, S3 const& arg3 ) + { + return Construct( heap_alloc( nSize ), arg1, arg2, arg3 ); + } + //@endcond +# endif + + /// Analogue of operator delete + void Delete( value_type * p ) + { + allocator_type::destroy( p ); + allocator_type::deallocate( p, 1 ); + } + + /// Analogue of operator delete [] + void Delete( value_type * p, size_t nCount ) + { + for ( size_t i = 0; i < nCount; ++i ) + allocator_type::destroy( p + i ); + allocator_type::deallocate( p, nCount ); + } + +# ifdef CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT +# if CDS_COMPILER == CDS_COMPILER_INTEL + //@cond + value_type * Construct( void * p ) + { + return new( p ) value_type; + } + //@endcond +# endif + /// Analogue of placement operator new( \p p ) T( src... ) + template + value_type * Construct( void * p, S const&... src ) + { + return new( p ) value_type( src... ); + } +# else + //@cond + /// Analogue of placement operator new( \p p ) T + value_type * Construct( void * p ) + { + return new( p ) value_type; + } + + + /// Analogue of placement operator new( \p p ) T( \p src ) + template + value_type * Construct( void * p, S const& src ) + { + return new( p ) value_type( src ); + } + + /// Analogue of placement operator new( \p p ) T( \p s1, \p s2 ) + template + value_type * Construct( void * p, S1 const& s1, S2 const& s2 ) + { + return new( p ) value_type( s1, s2 ); + } + + /// Analogue of placement operator new( \p p ) T( \p s1, \p s2, \p s3 ) + template + value_type * Construct( void * p, S1 const& s1, S2 const& s2, S3 const& s3 ) + { + return new( p ) value_type( s1, s2, s3 ); + } + //@endcond +# endif + +# ifdef CDS_EMPLACE_SUPPORT + /// Analogue of placement operator new( p ) T( std::forward(args)... ) + /** + This function is available only for compiler that supports + variadic template and move semantics + */ + template + value_type * MoveConstruct( void * p, Args&&... args ) + { + return new( p ) value_type( std::forward(args)... ); + } +# endif + + /// Rebinds allocator to other type \p Q instead of \p T + template + struct rebind { + typedef Allocator< Q, typename Alloc::template rebind::other > other ; ///< Rebinding result + }; + + private: + //@cond + void * heap_alloc( size_t nByteSize ) + { + assert( nByteSize >= sizeof(value_type)); + + size_t const nPtrSize = ( nByteSize + sizeof(void *) - 1 ) / sizeof(void *); + typedef typename allocator_type::template rebind< void * >::other void_allocator; + return void_allocator().allocate( nPtrSize ); + } + //@endcond + }; + + //@cond + namespace { + template + static inline void impl_call_dtor(T* p, boost::false_type const&) + { + p->T::~T(); + } + + template + static inline void impl_call_dtor(T* p, boost::true_type const&) + {} + } + //@endcond + + /// Helper function to call destructor of type T + /** + This function is empty for the type T that has trivial destructor. + */ + template + static inline void call_dtor( T* p ) + { + impl_call_dtor( p, ::boost::has_trivial_destructor() ); + } + + + /// Deferral removing of the object of type \p T. Helper class + template + struct deferral_deleter { + typedef T type ; ///< Type + typedef Alloc allocator_type ; ///< Allocator for removing + + /// Frees the object \p p + /** + Caveats: this function uses temporary object of type \ref cds::details::Allocator to free the node \p p. + So, the node allocator should be stateless. It is standard requirement for \p std::allocator class objects. + + Do not use this function directly. + */ + static void free( T * p ) + { + Allocator a; + a.Delete( p ); + } + }; + + } // namespace details +} // namespace cds + +#endif // #ifndef __CDS_DETAILS_ALLOCATOR_H diff --git a/cds/details/binary_functor_wrapper.h b/cds/details/binary_functor_wrapper.h new file mode 100644 index 00000000..1acbb2a5 --- /dev/null +++ b/cds/details/binary_functor_wrapper.h @@ -0,0 +1,62 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_BINARY_FUNCTOR_WRAPPER_H +#define __CDS_DETAILS_BINARY_FUNCTOR_WRAPPER_H + +#include + +//@cond +namespace cds { namespace details { + + template + struct binary_functor_wrapper { + typedef ReturnType return_type; + typedef Functor functor_type; + typedef ArgType argument_type; + typedef Accessor accessor; + + return_type operator()( argument_type const& a1, argument_type const& a2 ) const + { + return functor_type()( accessor()( a1 ), accessor()( a2 )); + } + + template + return_type operator()( argument_type const& a, Q const& q ) const + { + return functor_type()( accessor()(a), q ); + } + + template + return_type operator()( Q const& q, argument_type const& a ) const + { + return functor_type()( q, accessor()(a)); + } + + template + return_type operator()( Q1 const& q1, Q2 const& q2 ) const + { + return functor_type()( q1, q2 ); + } + }; + +#ifdef CDS_CXX11_TEMPLATE_ALIAS_SUPPORT + template + using predicate_wrapper = binary_functor_wrapper< bool, Predicate, ArgType, Accessor>; + + template + using compare_wrapper = binary_functor_wrapper< int, Compare, ArgType, Accessor>; +#else + template + struct predicate_wrapper: public binary_functor_wrapper< bool, Predicate, ArgType, Accessor> + {}; + + template + struct compare_wrapper: public binary_functor_wrapper< int, Compare, ArgType, Accessor > + {}; +#endif + +}} // namespace cds::details + +//@endcond + +#endif // #ifndef __CDS_DETAILS_BINARY_FUNCTOR_WRAPPER_H diff --git a/cds/details/bit_reverse_counter.h b/cds/details/bit_reverse_counter.h new file mode 100644 index 00000000..5f0df6ee --- /dev/null +++ b/cds/details/bit_reverse_counter.h @@ -0,0 +1,73 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_BIT_REVERSE_COUNTER_H +#define __CDS_DETAILS_BIT_REVERSE_COUNTER_H + +#include + +//@cond +namespace cds { namespace bitop { + + template + class bit_reverse_counter + { + public: + typedef Counter counter_type; + + private: + counter_type m_nCounter; + counter_type m_nReversed; + int m_nHighBit; + + public: + bit_reverse_counter() + : m_nCounter(0) + , m_nReversed(0) + , m_nHighBit(-1) + {} + + counter_type inc() + { + ++m_nCounter; + int nBit; + for ( nBit = m_nHighBit - 1; nBit >= 0; --nBit ) { + if ( !cds::bitop::complement( m_nReversed, nBit )) + break; + } + if ( nBit < 0 ) { + m_nReversed = m_nCounter; + ++m_nHighBit; + } + return m_nReversed; + } + + counter_type dec() + { + --m_nCounter; + int nBit; + for ( nBit = m_nHighBit - 1; nBit >= 0; --nBit ) { + if ( cds::bitop::complement( m_nReversed, nBit ) ) + break; + } + if ( nBit < 0 ) { + m_nReversed = m_nCounter; + --m_nHighBit; + } + return m_nReversed; + } + + counter_type value() const + { + return m_nCounter; + } + + counter_type reversed_value() const + { + return m_nReversed; + } + }; + +}} // namespace cds::bitop +//@endcond + +#endif // #ifndef __CDS_DETAILS_BIT_REVERSE_COUNTER_H diff --git a/cds/details/bitop_generic.h b/cds/details/bitop_generic.h new file mode 100644 index 00000000..8fed30bd --- /dev/null +++ b/cds/details/bitop_generic.h @@ -0,0 +1,271 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_BITOP_GENERIC_H +#define __CDS_DETAILS_BITOP_GENERIC_H + +#include // rand() +namespace cds { + namespace bitop { namespace platform { + // Return true if x = 2 ** k, k >= 0 +#ifndef cds_bitop_isPow2_32_DEFINED + static inline bool isPow2_32( atomic32u_t x ) + { + return (x & ( x - 1 )) == 0 && x; + } +#endif + +#ifndef cds_bitop_isPow2_64_DEFINED + static inline bool isPow2_64( atomic64_unaligned x ) + { + return (x & ( x - 1 )) == 0 && x; + } +#endif + + //*************************************************** + // Most significant bit number (1..N) + // Return 0 if x == 0 + // +#ifndef cds_bitop_msb32_DEFINED + // Return number (1..32) of most significant bit + // Return 0 if x == 0 + // Source: Linux kernel + static inline int msb32( atomic32u_t x ) + { + int r = 32; + + if (!x) + return 0; + if (!(x & 0xffff0000u)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000u)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000u)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000u)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000u)) { + x <<= 1; + r -= 1; + } + return r; + } +#endif + +#ifndef cds_bitop_msb32nz_DEFINED + static inline int msb32nz( atomic32u_t x ) + { + return msb32( x ) - 1; + } +#endif + +#ifndef cds_bitop_msb64_DEFINED + static inline int msb64( atomic64u_unaligned x ) + { + atomic32u_t h = (atomic32u_t) (x >> 32); + if ( h ) + return msb32( h ) + 32; + return msb32( (atomic32u_t) x ); + } +#endif + +#ifndef cds_bitop_msb64nz_DEFINED + static inline int msb64nz( atomic64u_unaligned x ) + { + return msb64( x ) - 1; + } +#endif + + //*************************************************** + // Least significant bit number (1..N) + // Return 0 if x == 0 + // +#ifndef cds_bitop_lsb32_DEFINED + // Return number (1..32) of least significant bit + // Return 0 if x == 0 + // Source: Linux kernel + static inline int lsb32( atomic32u_t x ) + { + int r = 1; + + if (!x) + return 0; + if (!(x & 0xffff)) { + x >>= 16; + r += 16; + } + if (!(x & 0xff)) { + x >>= 8; + r += 8; + } + if (!(x & 0xf)) { + x >>= 4; + r += 4; + } + if (!(x & 3)) { + x >>= 2; + r += 2; + } + if (!(x & 1)) { + x >>= 1; + r += 1; + } + return r; + } +#endif + +#ifndef cds_bitop_lsb32nz_DEFINED + static inline int lsb32nz( atomic32u_t x ) + { + return lsb32( x ) - 1; + } +#endif + +#ifndef cds_bitop_lsb64_DEFINED + static inline int lsb64( atomic64u_unaligned x ) + { + if ( !x ) + return 0; + if ( x & 0xffffffffu ) + return lsb32( (atomic32u_t) x ); + return lsb32( (atomic32u_t) (x >> 32) ) + 32; + } +#endif + +#ifndef cds_bitop_lsb64nz_DEFINED + static inline int lsb64nz( atomic64u_unaligned x ) + { + return lsb64( x ) - 1; + } +#endif + + //****************************************************** + // Reverse bit order + //****************************************************** +#ifndef cds_bitop_rbo32_DEFINED + static inline atomic32u_t rbo32( atomic32u_t x ) + { + // swap odd and even bits + x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1); + // swap consecutive pairs + x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2); + // swap nibbles ... + x = ((x >> 4) & 0x0F0F0F0F) | ((x & 0x0F0F0F0F) << 4); + // swap bytes + x = ((x >> 8) & 0x00FF00FF) | ((x & 0x00FF00FF) << 8); + // swap 2-byte long pairs + return ( x >> 16 ) | ( x << 16 ); + } +#endif + +#ifndef cds_bitop_rbo64_DEFINED + static inline atomic64u_t rbo64( atomic64u_unaligned x ) + { + // Low 32bit Hight 32bit + return ( ((atomic64u_t) rbo32( (atomic32u_t) x )) << 32 ) | ((atomic64u_t) rbo32( (atomic32u_t) (x >> 32) )); + } +#endif + + //****************************************************** + // Set bit count. Return count of non-zero bits in word + //****************************************************** +#ifndef cds_bitop_sbc32_DEFINED + static inline int sbc32( atomic32u_t x ) + { +# ifdef cds_beans_zbc32_DEFINED + return 32 - zbc32( x ); +# else + // Algorithm from Sean Eron Anderson's great collection + x = x - ((x >> 1) & 0x55555555); + x = (x & 0x33333333) + ((x >> 2) & 0x33333333); + return (((x + (x >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24; +# endif + } +#endif + +#ifndef cds_bitop_sbc64_DEFINED + static inline int sbc64( atomic64u_unaligned x ) + { +# ifdef cds_beans_zbc64_DEFINED + return 64 - zbc64( x ); +# else + return sbc32( (atomic32u_t) (x >> 32) ) + sbc32( (atomic32u_t) x ); +# endif + } +#endif + + //****************************************************** + // Zero bit count. Return count of zero bits in word + //****************************************************** +#ifndef cds_bitop_zbc32_DEFINED + static inline int zbc32( atomic32u_t x ) + { + return 32 - sbc32( x ); + } +#endif + +#ifndef cds_bitop_zbc64_DEFINED + static inline int zbc64( atomic64u_unaligned x ) + { + return 64 - sbc64( x ); + } +#endif + + // Bit complement +#ifndef cds_bitop_complement32_DEFINED + static inline bool complement32( atomic32u_t * pArg, unsigned int nBit ) + { + assert( pArg != NULL ); + atomic32u_t nVal = *pArg & (1 << nBit); + *pArg ^= 1 << nBit; + return nVal != 0; + } +#endif + +#ifndef cds_bitop_complement64_DEFINED + static inline bool complement64( atomic64u_t * pArg, unsigned int nBit ) + { + assert( pArg != NULL ); + atomic64u_t nVal = *pArg & (atomic64u_t(1) << nBit); + *pArg ^= atomic64u_t(1) << nBit; + return nVal != 0; + } +#endif + + /* + Simple random number generator + Source: + [2003] George Marsaglia "Xorshift RNGs" + */ + static inline uint32_t RandXorShift32(uint32_t x) + { + //static atomic32u_t xRandom = 2463534242UL ; //rand() | 0x0100 ; // must be nonzero + //atomic32u_t x = xRandom; + if ( !x ) + x = ((rand() + 1) << 16) + rand() + 1; + x ^= x << 13; + x ^= x >> 15; + return x ^= x << 5; + } + + static inline uint64_t RandXorShift64(uint64_t x) + { + //static atomic64u_t xRandom = 88172645463325252LL; + //atomic64u_t x = xRandom; + if ( !x ) + x = 88172645463325252LL; + x ^= x << 13; + x ^= x >> 7; + return x ^= x << 17; + } + }} // namespace bitop::platform +} // namespace cds + +#endif // __CDS_DETAILS_BITOP_GENERIC_H diff --git a/cds/details/bounded_array.h b/cds/details/bounded_array.h new file mode 100644 index 00000000..97a783ad --- /dev/null +++ b/cds/details/bounded_array.h @@ -0,0 +1,231 @@ +//$$CDS-header$$ + +#ifndef __CDS_IMPL_BOUNDED_ARRAY_H +#define __CDS_IMPL_BOUNDED_ARRAY_H + +/* + Dynamic non-growing array + + Editions: + 2008.03.08 Maxim.Khiszinsky Created +*/ + +#include +#include +#include + +//@cond +namespace cds { + namespace details { + + /// Upper bounded dynamic array + /** + BoundedArray is dynamic allocated C-array of item of type T with the interface like STL. + The max size (capacity) of array is defined at ctor time and cannot be changed during object's lifetime + + \par Template parameters + - \p T type of elements + - \p Allocator dynamic memory allocator class (std::allocator semantics) + + This class is deprecated: it is based on std::vector and does not support non-copyable type \p T. + See \ref bounded_array. + */ + template + class BoundedArray: private std::vector< T, typename Allocator::template rebind::other > + { + public: + typedef T value_type ; ///< value type stored in the array + typedef Allocator allocator_type ; ///< allocator type + typedef std::vector::other> vector_type ; ///< underlying vector type + + typedef typename vector_type::iterator iterator ; ///< item iterator + typedef typename vector_type::const_iterator const_iterator ; ///< item const iterator + + public: + /// Default ctor + explicit BoundedArray( + size_t nCapacity ///< capacity + ) + { + vector_type::resize( nCapacity ); + assert( size() == capacity() ); + } + + /// Ctor with item's initialization + BoundedArray( + size_t nCapacity, ///< capacity of array + const value_type& init, ///< initial value of any item + size_t nInitCount = 0 ///< how many items will be initialized; 0 - all items + ) + { + assert( nInitCount <= nCapacity ); + vector_type::resize( nCapacity ); + assign( nInitCount ? nInitCount : vector_type::capacity(), init ); + assert( size() == capacity() ); + } + + const value_type& operator []( size_t nItem ) const + { + return vector_type::operator[](nItem); + } + + value_type& operator []( size_t nItem ) + { + return vector_type::operator[](nItem); + } + + size_t size() const + { + return vector_type::size(); + } + + size_t capacity() const + { + return vector_type::capacity(); + } + + /// Returns sizeof(T) + static size_t itemSize() + { + return sizeof(T); + } + + /// Returns pointer to the first item in the array + value_type * top() + { + return & vector_type::front(); + } + + friend value_type * operator +( BoundedArray& arr, size_t i ) + { + return &( arr[i] ); + } + + /// Get begin iterator + const_iterator begin() const + { + return vector_type::begin(); + } + iterator begin() + { + return vector_type::begin(); + } + + /// Get end iterator + const_iterator end() const + { + return vector_type::end(); + } + iterator end() + { + return vector_type::end(); + } + + /// Get end iterator for \p nMax-th item + const_iterator end( size_t nMax ) const + { + assert( nMax <= vector_type::capacity()); + return vector_type::begin() + nMax; + } + iterator end( size_t nMax ) + { + assert( nMax <= vector_type::capacity()); + return vector_type::begin() + nMax; + } + }; + + /// Bounded dynamic array + /** + The class template is intended for storing fixed-size sequences of objects. + Array capacity is constant and cannot be changed after creation of object of the class. + It is suitable for managing objects of non-copyable type \p T. + + \par Template parameters + - \p T type of elements + - \p Allocator dynamic memory allocator class (std::allocator semantics) + + */ + template + class bounded_array + { + public: + typedef T value_type ; ///< value type stored in the array + typedef Allocator allocator_type ; ///< allocator type + + typedef value_type * iterator ; ///< item iterator + typedef value_type const * const_iterator ; ///< item const iterator + + private: + typedef cds::details::Allocator< T, allocator_type> allocator_impl; + + value_type * m_arr; + const size_t m_nCapacity; + + public: + /// Default ctor + explicit bounded_array( + size_t nCapacity ///< capacity + ) + : m_arr( allocator_impl().NewArray( nCapacity ) ) + , m_nCapacity( nCapacity ) + {} + + ~bounded_array() + { + allocator_impl().Delete( m_arr, capacity() ); + } + + const value_type& operator []( size_t nItem ) const + { + assert( nItem < capacity() ); + return m_arr[nItem]; + } + + value_type& operator []( size_t nItem ) + { + assert( nItem < capacity() ); + return m_arr[nItem]; + } + + size_t size() const CDS_NOEXCEPT + { + return capacity(); + } + + size_t capacity() const CDS_NOEXCEPT + { + return m_nCapacity; + } + + /// Returns pointer to the first item in the array + value_type * top() + { + return m_arr; + } + + /// Get begin iterator + const_iterator begin() const CDS_NOEXCEPT + { + return m_arr; + } + iterator begin() CDS_NOEXCEPT + { + return m_arr; + } + + /// Get end iterator + const_iterator end() const CDS_NOEXCEPT + { + return begin() + capacity(); + } + iterator end() CDS_NOEXCEPT + { + return begin() + capacity(); + } + }; + + } // namespace details +} // namespace cds +//@endcond + +#endif // #ifndef __CDS_IMPL_BOUNDED_ARRAY_H diff --git a/cds/details/bounded_container.h b/cds/details/bounded_container.h new file mode 100644 index 00000000..24fac0ad --- /dev/null +++ b/cds/details/bounded_container.h @@ -0,0 +1,15 @@ +//$$CDS-header$$ + +#ifndef __CDS_BOUNDED_CONTAINER_H +#define __CDS_BOUNDED_CONTAINER_H + +namespace cds { + /// Bounded container + /** + If a container has upper limit of item then it should be based on bounded_container class. + Example of those containers: cyclic queue (cds::container::TsigasCycleQueue) + */ + struct bounded_container {}; +} // namespace cds + +#endif // __CDS_BOUNDED_CONTAINER_H diff --git a/cds/details/comparator.h b/cds/details/comparator.h new file mode 100644 index 00000000..3728f58d --- /dev/null +++ b/cds/details/comparator.h @@ -0,0 +1,61 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_COMPARATOR_H +#define __CDS_DETAILS_COMPARATOR_H + +#include +#include + +namespace cds { + /// Helper classes and functions + namespace details { + /// Comparator + /** + Comparator is a functor (a class with binary @a operator() ) that compares two values. + The comparator is based on \p std::less functor and returns the result + of comparing of two values: + \li -1 if a < b + \li 0 if a == b + \li 1 if a > b + */ + template + class Comparator { + //@cond + std::less< T > m_cmp; + //@endcond + public: + typedef T value_type ; ///< Type of values to compare + + /// Compare method + /** + @return -1 if @p p1 < @p p2 \n + 0 if @p p1 == @p p2 \n + 1 if @p p1 > @p p2 \n + */ + int operator()( const T& p1, const T& p2 ) const + { + if ( m_cmp( p1, p2 ) ) + return -1; + if ( m_cmp( p2, p1 )) + return 1; + return 0; + } + }; + + /// String specialization. It uses @a string::compare method + template <> + class Comparator< std::string > { + public: + //@cond + typedef std::string value_type ; ///< Type of values to compare + + int operator()( const std::string& str1, const std::string& str2 ) const + { + return str1.compare( str2 ); + } + //@endcond + }; + } // namespace details +} // namespace cds + +#endif // #ifndef __CDS_DETAILS_COMPARATOR_H diff --git a/cds/details/cxx11_features.h b/cds/details/cxx11_features.h new file mode 100644 index 00000000..cbafeb91 --- /dev/null +++ b/cds/details/cxx11_features.h @@ -0,0 +1,26 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_CXX11_FEATURES_H +#define __CDS_DETAILS_CXX11_FEATURES_H +//@cond + +#ifndef __CDS_DEFS_H +# error " cannot be included directly, use instead" +#endif + +// =delete function specifier +#ifdef CDS_CXX11_DELETE_DEFINITION_SUPPORT +# define CDS_DELETE_SPECIFIER =delete +#else +# define CDS_DELETE_SPECIFIER +#endif + +// =default function specifier +#ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT +# define CDS_DEFAULT_SPECIFIER =default +#else +# define CDS_DEFAULT_SPECIFIER +#endif + +//@endcond +#endif // #ifndef __CDS_DETAILS_CXX11_FEATURES_H diff --git a/cds/details/defs.h b/cds/details/defs.h new file mode 100644 index 00000000..f8a7e5f4 --- /dev/null +++ b/cds/details/defs.h @@ -0,0 +1,539 @@ +//$$CDS-header$$ + +#ifndef __CDS_DEFS_H +#define __CDS_DEFS_H + +#include +#include +#include +#include + +#include + +/** \mainpage CDS: Concurrent Data Structures library + + This library is a collection of lock-free and lock-based fine-grained algorithms of data structures + like maps, queues, list etc. The library contains implementation of well-known data structures + and memory reclamation schemas for modern processor architectures. + + Supported processor architectures and operating systems (OS) are: + - x86 [32bit] Linux, Windows, FreeBSD, MinGW + - amd64 (x86-64) [64bit] Linux, Windows, FreeBSD, MinGW + - ia64 (itanium) [64bit] Linux, HP-UX 11.23, HP-UX 11.31 + - sparc [64bit] Sun Solaris + - Mac OS X amd64 + + Supported compilers: + - GCC 4.3+ - for the UNIX-like OSes + - Clang 3.0+ - for Linux + - MS Visual C++ 2008 and above - for MS Windows + + For each lock-free data structure the \p CDS library presents several implementation based on published papers. For + example, there are several implementations of queue, each of them is divided by memory reclamation + schema used. However, any implementation supports common interface for the type of data structure. + + To implement any lock-free data structure, two things are needed: + - atomic operation library conforming with C++11 memory model. The libcds has such feature, see cds::cxx11_atomics namespace for + details and compiler-specific information. + - safe memory reclamation (SMR) or garbage collecting (GC) algorithm. The libcds has an implementation of several + well-known SMR algos, see below. + + The main part of lock-free data structs is garbage collecting. The garbage collector (GC) solves the problem of safe + memory reclamation that is one of the main problems for lock-free programming. + The library contains the implementations of several light-weight \ref cds_garbage_collector "memory reclamation schemes": + - M.Michael's Hazard Pointer - see cds::gc::HP for more explanation + - Gidenstam's memory reclamation schema based on Hazard Pointer and reference counting - see cds::gc::HRC + - M.Herlihy and M.Moir's Pass The Buck algorithm - see cds::gc::PTB + - User-space Read-Copy Update (RCU) - see cds::urcu namespace + - there is cds::gc::nogc "GC" for containers that do not support item reclamation. + + Many GC requires a support from the thread. The library does not define the threading model you must use, + it is developed to support various ones; about incorporating cds library to your threading model see cds::threading. + + The main namespace for the library is \ref cds. + To see the full list of container's class go to modules tab. + + \par How to build + + The cds is mostly header-only library. Only small part of library related to GC core functionality + must be compiled. The test projects depends on the following static library from \p boost: + - boost_thread + - boost_date_time + + \par Windows build + + Prerequisites: for building cds library and test suite you need: + - perl installed; \p PATH environment variable + should contain full path to Perl binary. Perl is used to generate large dictionary for testing purpose; + - boost library 1.51 and above. You should create environment variable + \p BOOST_PATH containing full path to \p boost root directory (for example, C:\\libs\\boost_1_47_0). + + Open solution file cds\projects\vcX\cds.sln where vcX - version of + Microsoft Visual C++ you use: vc9 for MS VC 2008, vc10 for MS VC 2010 and so on. The solution + contains cds project and several test projects. Just build the library using solution. + + Warning: the solution depends on \p BOOST_PATH environment variable that specifies full path + to \p boost library root directory. The test projects search \p boost libraries in: + - for 32bit: \$(BOOST_PATH)/stage/lib, \$(BOOST_PATH)/stage32/lib, and \$(BOOST_PATH)/bin. + - for 64bit: \$(BOOST_PATH)/stage64/lib and \$(BOOST_PATH)/bin. + + \par *NIX build + + For Unix-like systems GCC and Clang compilers are supported. + Use GCC 4.3 (or above) compiler or Clang 3.0 or above to build cds library. The distributive contains + makefile and build.sh script in build directory. + The build/sample directory contains sample scripts for different operating systems and + processor architectures. + The build.sh script supports the following options: + - -c toolset - Toolset name, possible values: gcc (default), clang + - -x compiler - C++ compiler name (e.g. g++, g++-4.5 and so on) + - -p arch - processor architecture; possible values for arch are: x86, amd64 (x86_64), sparc, ia64 + - -o OStype - OS family; possible values for OStype are: linux, sunos (solaris), hpux + - -D define additional defines + - -b bits - bits to build, accepts '64', '32' + - -l "options" - extra linker options (in quotes) + - -z "options" - extra compiler options (in quotes) + - --with-boost path - path to boost include + - --debug-cxx-options "options" - extra compiler options for debug target + - --debug-ld-options "options" - extra linker options for debug target + - --release-cxx-options "options" - extra compiler options for release target + - --release-ld-options "optons" - extra linker options for release target + - --clean - clean all before building + - --debug-test - make unit test in debug mode; by defalt release unit test generated + - --amd64-use-128bit - compile with supporting 128bit (16byte) CAS on amd64 (for am64 only) + + Important for GCC compiler: all your projects that use libcds must be compiled with -fno-strict-aliasing + compiler flag. Also, the compiler option -std=c++0x is very useful. + + \anchor cds_how_to_use + \par How to use + + To use \p cds lock-free containers based on garbage collectors (GC) provided by library + your application must be linked with \p libcds. + + The main part of lock-free programming is garbage collecting for safe memory reclamation. + The library provides several types of GC schemes. One of widely used and well-tested one is Hazard Pointer + memory reclamation schema discovered by M. Micheal and implemented in the library as cds::gc::HP class. + Usually, the application is based on only one type of GC. + + In the next example we mean that your application uses Hazard Pointer (cds::gc::HP) - based containers. + Other GCs (cds::gc::HRC, cds::gc::PTB) are applied analogously. + + First, in your code you should initialize \p cds library and a garbage collector in \p main function: + \code + #include // for cds::Initialize and cds::Terminate + #include // for cds::HP (Hazard Pointer) garbage collector + + int main(int argc, char** argv) + { + // Initialize libcds + cds::Initialize(); + + { + // Initialize Hazard Pointer singleton + cds::gc::HP hpGC; + + // If main thread uses lock-free containers + // the main thread should be attached to libcds infrastructure + cds::threading::Manager::attachThread(); + + // Now you can use HP-based containers in the main thread + //... + } + + // Terminate libcds + cds::Terminate(); + } + \endcode + + Second, any of your thread should be attached to \p cds infrastructure. + \code + #include + + int myThreadEntryPoint(void *) + { + // Attach the thread to libcds infrastructure + cds::threading::Manager::attachThread(); + + // Now you can use HP-based containers in the thread + //... + + // Detach thread when terminating + cds::threading::Manager::detachThread(); + } + \endcode + + After that, you can use \p cds lock-free containers safely without any external synchronization. + + In some cases, you should work in an external thread. For example, your application + is a plug-in for a server that calls your code in a thread that has been created by the server. + In this case, you should use persistent mode of garbage collecting. In this mode, the thread attaches + to the GC singleton only if it is not attached yet and never call detaching: + \code + #include + + int plugin_entry_point() + { + // Attach the thread if it is not attached yet + if ( !cds::threading::Manager::isThreadAttached() ) + cds::threading::Manager::attachThread(); + + // Do some work with HP-related containers + ... + } + \endcode + +*/ + + +/// The main library namespace +namespace cds {} + +/* + \brief Basic typedefs and defines + + You do not need include this header directly. All library header files depends on defs.h and include it. + + Defines macros: + + CDS_COMPILER Compiler: + - CDS_COMPILER_MSVC Microsoft Visual C++ + - CDS_COMPILER_GCC GNU C++ + - CDS_COMPILER_CLANG clang + - CDS_COMPILER_UNKNOWN unknown compiler + + CDS_COMPILER__NAME Character compiler name + + CDS_COMPILER_VERSION Compliler version (number) + + CDS_BUILD_BITS Resulting binary code: + - 32 32bit + - 64 64bit + - -1 undefined + + CDS_POW2_BITS CDS_BUILD_BITS == 2**CDS_POW2_BITS + + CDS_PROCESSOR_ARCH The processor architecture: + - CDS_PROCESSOR_X86 Intel x86 (32bit) + - CDS_PROCESSOR_AMD64 Amd64, Intel x86-64 (64bit) + - CDS_PROCESSOR_IA64 Intel IA64 (Itanium) + - CDS_PROCESSOR_SPARC Sparc + - CDS_PROCESSOR_PPC64 PowerPC64 + - CDS_PROCESSOR_ARM7 ARM v7 + - CDS_PROCESSOR_UNKNOWN undefined processor architecture + + CDS_PROCESSOR__NAME The name (string) of processor architecture + + CDS_OS_TYPE Operating system type: + - CDS_OS_UNKNOWN unknown OS + - CDS_OS_PTHREAD unknown OS with pthread + - CDS_OS_WIN32 Windows 32bit + - CDS_OS_WIN64 Windows 64bit + - CDS_OS_LINUX Linux + - CDS_OS_SUN_SOLARIS Sun Solaris + - CDS_OS_HPUX HP-UX + - CDS_OS_AIX IBM AIX + - CDS_OS_BSD FreeBSD, OpenBSD, NetBSD - common flag + - CDS_OS_FREE_BSD FreeBSD + - CDS_OS_OPEN_BSD OpenBSD + - CSD_OS_NET_BSD NetBSD + - CDS_OS_MINGW MinGW + - CDS_OS_OSX Apple OS X + + CDS_OS__NAME The name (string) of operating system type + + CDS_OS_INTERFACE OS interface: + - CDS_OSI_UNIX Unix (POSIX) + - CDS_OSI_WINDOWS Windows + + + CDS_BUILD_TYPE Build type: 'RELEASE' or 'DEBUG' string + +*/ + +#if defined(_DEBUG) || !defined(NDEBUG) +# define CDS_DEBUG +# define CDS_BUILD_TYPE "DEBUG" +#else +# define CDS_BUILD_TYPE "RELEASE" +#endif + +/// Unused function argument +#define CDS_UNUSED(x) (void)(x) + +// Supported compilers: +#define CDS_COMPILER_MSVC 1 +#define CDS_COMPILER_GCC 2 +#define CDS_COMPILER_INTEL 3 +#define CDS_COMPILER_CLANG 4 +#define CDS_COMPILER_UNKNOWN -1 + +// Supported processor architectures: +#define CDS_PROCESSOR_X86 1 +#define CDS_PROCESSOR_IA64 2 +#define CDS_PROCESSOR_SPARC 3 +#define CDS_PROCESSOR_AMD64 4 +#define CDS_PROCESSOR_PPC64 5 // PowerPC 64bit +#define CDS_PROCESSOR_ARM7 7 +#define CDS_PROCESSOR_UNKNOWN -1 + +// Supported OS interfaces +#define CDS_OSI_UNKNOWN 0 +#define CDS_OSI_UNIX 1 +#define CDS_OSI_WINDOWS 2 + +// Supported operating systems (value of CDS_OS_TYPE): +#define CDS_OS_UNKNOWN -1 +#define CDS_OS_WIN32 1 +#define CDS_OS_WIN64 5 +#define CDS_OS_LINUX 10 +#define CDS_OS_SUN_SOLARIS 20 +#define CDS_OS_HPUX 30 +#define CDS_OS_AIX 50 // IBM AIX +#define CDS_OS_FREE_BSD 61 +#define CDS_OS_OPEN_BSD 62 +#define CDS_OS_NET_BSD 63 +#define CDS_OS_MINGW 70 +#define CDS_OS_OSX 80 +#define CDS_OS_PTHREAD 100 + +#if defined(_MSC_VER) +# if defined(__ICL) || defined(__INTEL_COMPILER) +# define CDS_COMPILER CDS_COMPILER_INTEL +# else +# define CDS_COMPILER CDS_COMPILER_MSVC +# endif +#elif defined(__clang__) // Clang checking must be before GCC since Clang defines __GCC__ too +# define CDS_COMPILER CDS_COMPILER_CLANG +#elif defined( __GCC__ ) || defined(__GNUC__) +# if defined(__ICL) || defined(__INTEL_COMPILER) +# define CDS_COMPILER CDS_COMPILER_INTEL +# else +# define CDS_COMPILER CDS_COMPILER_GCC +# endif +#else +# define CDS_COMPILER CDS_COMPILER_UNKNOWN +#endif // Compiler choice + + +// CDS_VERIFY: Debug - assert(_expr); Release - _expr +#ifdef CDS_DEBUG +# define CDS_VERIFY( _expr ) assert( _expr ) +# define CDS_DEBUG_DO( _expr ) _expr +#else +# define CDS_VERIFY( _expr ) _expr +# define CDS_DEBUG_DO( _expr ) +#endif + +#ifdef CDS_STRICT +# define CDS_STRICT_DO(_expr) _expr +#else +# define CDS_STRICT_DO( _expr ) +#endif + + +// Compiler-specific defines +#include +// New C++11 features +#include + +#define CDS_NOEXCEPT CDS_NOEXCEPT_SUPPORT +#define CDS_NOEXCEPT_( expr ) CDS_NOEXCEPT_SUPPORT_( expr ) +#ifndef CDS_NOEXCEPT_DEFAULTED + // Some compilers do not allow noexcept specification in defaulted function + // For example, GCC 4.6.x raise following error: + // void foo() noexcept = default + // error: function ‘foo’ defaulted on its first declaration must not have an exception-specification + // For such compiler empty CDS_NOEXCEPT_DEFAULTED must be defined +# define CDS_NOEXCEPT_DEFAULTED CDS_NOEXCEPT +# define CDS_NOEXCEPT_DEFAULTED_(expr) CDS_NOEXCEPT_( expr ) +#endif + +#if defined(CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT) && defined(CDS_MOVE_SEMANTICS_SUPPORT) +# define CDS_EMPLACE_SUPPORT +#endif + +#ifdef CDS_CXX11_INLINE_NAMESPACE_SUPPORT +# define CDS_CXX11_INLINE_NAMESPACE inline +#else +# define CDS_CXX11_INLINE_NAMESPACE +#endif + +//@cond +// typedefs for back compatibility +namespace cds { + /// Atomic pointer + typedef void * pointer_t; + + /// 64bit unaligned int + typedef int64_t atomic64_unaligned; + + /// 64bit unaligned unsigned int + typedef uint64_t atomic64u_unaligned; + + /// 64bit aligned int + typedef atomic64_unaligned CDS_TYPE_ALIGNMENT(8) atomic64_aligned; + + /// 64bit aligned unsigned int + typedef atomic64u_unaligned CDS_TYPE_ALIGNMENT(8) atomic64u_aligned; + + /// 64bit atomic int (aligned) + typedef atomic64_aligned atomic64_t; + + /// 64bit atomic unsigned int (aligned) + typedef atomic64u_aligned atomic64u_t; + + /// 32bit atomic int + typedef int32_t atomic32_t; + + /// 32bit atomic unsigned int + typedef uint32_t atomic32u_t; + + /// atomic int + typedef atomic32_t atomic_t; + + /// atomic unsigned int + typedef atomic32u_t unsigned_atomic_t; + + /// atomic int sized as pointer + typedef intptr_t ptr_atomic_t; + + /// atomic unsigned int sized as pointer + typedef uintptr_t uptr_atomic_t; +} // namespace cds +//@endcond + +/************************************************************************* + Common things +**************************************************************************/ + +#include + +namespace cds { + + //@cond + /// Helper template: converts volatile pointer to non-volatile one + template + static inline T * non_volatile( T volatile * p ) { return const_cast( p ); } + + template + static inline T * non_volatile( T * p ) { return p; } + //@endcond + + /// Base of all exceptions in the library + class Exception: public std::exception + { + protected: + std::string m_strMsg ; ///< Exception message + public: + /// Create empty exception + Exception() + {} + /// Create exception with message + explicit Exception( const char * pszMsg ) + : m_strMsg( pszMsg ) + {} + /// Create exception with message + explicit Exception( const std::string& strMsg ) + :m_strMsg( strMsg ) + {} + + /// Destructor + virtual ~Exception() throw() + {} + + /// Return exception message + virtual const char * what( ) const throw() + { + return m_strMsg.c_str(); + } + }; + +//@cond +# define CDS_PURE_VIRTUAL_FUNCTION_CALLED { assert(false); throw Exception("Pure virtual function called"); } +# define CDS_PURE_VIRTUAL_FUNCTION_CALLED_(method_name) { assert(false); throw Exception("Pure virtual function called " method_name ); } +//@endcond + + /// any_type is used as a placeholder for auto-calculated type (usually in \p rebind templates) + struct any_type {}; + + /** \def CDS_DECLARE_EXCEPTION( _class, _msg ) + Simplifying declaration of specific exception (usual within classes) + - @p _class - the class name of exception + - @p _msg - exception message (const char *) + */ +#define CDS_DECLARE_EXCEPTION( _class, _msg ) \ + struct _class: public std::exception { \ + public: \ + _class(): std::exception() {} \ + virtual const char * what( ) const throw() { return _msg; } \ + } + + + //@cond + // This template function should be replaced with nullptr keyword when all compilers will support it + template + static inline CDS_CONSTEXPR T null_ptr() CDS_NOEXCEPT + { + return reinterpret_cast( NULL ); + } + //@endcond + +} // namespace cds + + +/// @defgroup cds_cxx11_stdlib_wrapper New C++11 standard library support + +/// C++11 standard library wrapper namespace +/** @ingroup cds_cxx11_stdlib_wrapper + libcds needs support from new features defined in C++11 standard library. + In case when an old compiler and corresponding \p std library has no required feature + the \p boost library is used if possible. The \p %cds_std namespace is a wrapper for new C++11 stdlib classes: + - if the compiler supports new feature, this feature places (with \p using directive) into \p %cds_std namespace "as is" + - otherwise the \p boost analog is used and it places into \p %cds_std namespace too + + For example, for class \p std::mutex the \p libcds does the following: + - for old compiler: \code + #include + namespace cds_std { + using boost::mutex; + } + \endcode + - for C++11-ready compiler: \code + #include + namespace cds_std { + using std::mutex; + } + \endcode + + Everywhere in \p libcds the class \p %cds_std::mutex is used instead of \p std::mutex. + + Note, not all C++11 features are contained in \p %cds_std but only required by \p libcds. + + In future when all compilers will be C++11-ready we can transform the \p libcds + to new C++11 standard library changing \p %cds_std namespace to native \p std. +*/ +namespace cds_std {} + +//@cond +#ifdef _DEBUG +# define cds_assert(X) assert(X) +#else +# include // snprintf + static inline void cds_assert_( bool bCond, char const * pszMsg, char const * pszFile, int nLine ) + { + if ( !bCond ) { + char buf[4096]; +# if CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL + _snprintf_s( buf, sizeof(buf)/sizeof(buf[0]), _TRUNCATE, pszMsg, pszFile, nLine ); +# else + snprintf( buf, sizeof(buf)/sizeof(buf[0]), pszMsg, pszFile, nLine ); +# endif + throw cds::Exception( buf ); + } + } +# define cds_assert(X) cds_assert_( X, "%s (%d): Assert failed: " #X, __FILE__, __LINE__ ); +#endif +//@endcond + +#endif // #ifndef __CDS_DEFS_H diff --git a/cds/details/functor_wrapper.h b/cds/details/functor_wrapper.h new file mode 100644 index 00000000..d4882a43 --- /dev/null +++ b/cds/details/functor_wrapper.h @@ -0,0 +1,152 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_FUNCTOR_WRAPPER_H +#define __CDS_DETAILS_FUNCTOR_WRAPPER_H + +#include + +//@cond +namespace cds { namespace details { + + template + struct functor_wrapper + { + public: + functor_wrapper() + {} + + functor_wrapper( Functor /*f*/) + {} + + Functor get() + { + return Functor(); + } + }; + + template + struct functor_wrapper + { + Functor& m_func; + public: + functor_wrapper( Functor& f) + : m_func(f) + {} + + Functor& get() + { + return m_func; + } + }; + + template + struct functor_wrapper< boost::reference_wrapper > + { + boost::reference_wrapper m_func; + public: + functor_wrapper( boost::reference_wrapper f) + : m_func(f) + {} + + Functor& get() + { + return m_func.get(); + } + }; + +#ifdef CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT + template + struct functor_wrapper + { + typedef Result (* func_ptr)(Args...); + typedef Result (& func_ref)(Args...); + func_ptr m_func; + public: + functor_wrapper( func_ptr f ) + : m_func(f) + {} + + func_ref get() + { + assert( m_func != NULL ); + return *m_func; + } + }; +#else + template + struct functor_wrapper + { + typedef Result (* func_ptr)(); + typedef Result (& func_ref)(); + func_ptr m_func; + public: + functor_wrapper( func_ptr f ) + : m_func(f) + {} + + func_ref get() + { + assert( m_func != NULL ); + return *m_func; + } + }; + + template + struct functor_wrapper + { + typedef Result (* func_ptr)(Arg1); + typedef Result (& func_ref)(Arg1); + func_ptr m_func; + public: + functor_wrapper( func_ptr f ) + : m_func(f) + {} + + func_ref get() + { + assert( m_func != NULL ); + return *m_func; + } + }; + + template + struct functor_wrapper + { + typedef Result (* func_ptr)(Arg1, Arg2); + typedef Result (& func_ref)(Arg1, Arg2); + func_ptr m_func; + public: + functor_wrapper( func_ptr f ) + : m_func(f) + {} + + func_ref get() + { + assert( m_func != NULL ); + return *m_func; + } + }; + + template + struct functor_wrapper + { + typedef Result (* func_ptr)(Arg1, Arg2, Arg3); + typedef Result (& func_ref)(Arg1, Arg2, Arg3); + func_ptr m_func; + public: + functor_wrapper( func_ptr f ) + : m_func(f) + {} + + func_ref get() + { + assert( m_func != NULL ); + return *m_func; + } + }; + +#endif +}} // namespace cds::details +//@endcond + +#endif // #ifndef __CDS_DETAILS_FUNCTOR_WRAPPER_H diff --git a/cds/details/hash_functor_selector.h b/cds/details/hash_functor_selector.h new file mode 100644 index 00000000..c7a49d4e --- /dev/null +++ b/cds/details/hash_functor_selector.h @@ -0,0 +1,51 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_HASH_FUNCTOR_SELECTOR_H +#define __CDS_DETAILS_HASH_FUNCTOR_SELECTOR_H + +//@cond + +#if CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL +# include +#elif CDS_COMPILER == CDS_COMPILER_GCC || CDS_COMPILER == CDS_COMPILER_CLANG + // GCC 4.3+ +# include +#else + // Default, use boost implementation +# include +#endif + +namespace cds { namespace details { + +#if CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL +# if _MSC_VER >= 1600 + // MSVC 2010 and above + using std::hash; +# define CDS_BEGIN_STD_HASH_NAMESPACE namespace std { +# define CDS_END_STD_HASH_NAMESPACE } +# define CDS_STD_HASH_NAMESPACE std +# else + // MSVC 2008 + using std::tr1::hash; +# define CDS_BEGIN_STD_HASH_NAMESPACE namespace std { namespace tr1 { +# define CDS_END_STD_HASH_NAMESPACE }} +# define CDS_STD_HASH_NAMESPACE std::tr1 +# endif +#elif CDS_COMPILER == CDS_COMPILER_GCC || CDS_COMPILER == CDS_COMPILER_CLANG + // GCC 4.3+ + using std::hash; +# define CDS_BEGIN_STD_HASH_NAMESPACE namespace std { +# define CDS_END_STD_HASH_NAMESPACE } +# define CDS_STD_HASH_NAMESPACE std +#else + // Default, use boost implementation + using std::tr1::hash; +# define CDS_BEGIN_STD_HASH_NAMESPACE namespace std { namespace tr1 { +# define CDS_END_STD_HASH_NAMESPACE }} +# define CDS_STD_HASH_NAMESPACE std::tr1 +#endif + +}} // namespace cds::details +//@endcond + +#endif // __CDS_DETAILS_HASH_FUNCTOR_SELECTOR_H diff --git a/cds/details/is_aligned.h b/cds/details/is_aligned.h new file mode 100644 index 00000000..f50b899f --- /dev/null +++ b/cds/details/is_aligned.h @@ -0,0 +1,36 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_IS_ALIGNED_H +#define __CDS_DETAILS_IS_ALIGNED_H + +#include + +namespace cds { namespace details { + + /// Checks if the pointer \p p has \p ALIGN byte alignment + /** + \p ALIGN must be power of 2. + + The function is mostly intended for run-time assertion + */ + template + static inline bool is_aligned(T const * p) + { + return (((uptr_atomic_t)p) & uptr_atomic_t(ALIGN - 1)) == 0; + } + + /// Checks if the pointer \p p has \p nAlign byte alignment + /** + \p nAlign must be power of 2. + + The function is mostly intended for run-time assertion + */ + template + static inline bool is_aligned(T const * p, size_t nAlign) + { + return (((uptr_atomic_t)p) & uptr_atomic_t(nAlign - 1)) == 0; + } + +}} // namespace cds::details + +#endif // #ifndef __CDS_DETAILS_IS_ALIGNED_H diff --git a/cds/details/lib.h b/cds/details/lib.h new file mode 100644 index 00000000..ecfb01b2 --- /dev/null +++ b/cds/details/lib.h @@ -0,0 +1,28 @@ +//$$CDS-header$$ + +#ifndef __CDS_LIB_H +#define __CDS_LIB_H +//@cond + +#include + +#ifndef CDS_BUILD_LIB + +#ifdef _DEBUG +# define CDS_LIB_DEBUG_SUFFIX "_debug" +#else +# define CDS_LIB_DEBUG_SUFFIX "" +#endif + +#if CDS_COMPILER == CDS_COMPILER_MSVC +# pragma comment( lib, "libcds-" CDS_PROCESSOR__NICK "-" CDS_COMPILER__NICK CDS_LIB_DEBUG_SUFFIX ) +#elif CDS_COMPILER == CDS_COMPILER_INTEL +# pragma comment( lib, "libcds-" CDS_PROCESSOR__NICK "-" CDS_COMPILER__NICK CDS_LIB_DEBUG_SUFFIX ) +#endif + +#undef CDS_LIB_DEBUG_SUFFIX + +#endif // #ifndef CDS_BUILD_LIB + +//@endcond +#endif // #ifndef __CDS_LIB_H diff --git a/cds/details/make_const_type.h b/cds/details/make_const_type.h new file mode 100644 index 00000000..4ece20e3 --- /dev/null +++ b/cds/details/make_const_type.h @@ -0,0 +1,30 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_MAKE_CONST_TYPE_H +#define __CDS_DETAILS_MAKE_CONST_TYPE_H + +#include + +namespace cds { namespace details { + + //@cond + template + struct make_const_type + { + typedef T type; + typedef T * pointer; + typedef T & reference; + }; + template + struct make_const_type + { + typedef T const type; + typedef T const * pointer; + typedef T const & reference; + }; + + //@endcond + +}} // namespace cds::details + +#endif // #ifndef __CDS_DETAILS_MAKE_CONST_TYPE_H diff --git a/cds/details/marked_ptr.h b/cds/details/marked_ptr.h new file mode 100644 index 00000000..3645e052 --- /dev/null +++ b/cds/details/marked_ptr.h @@ -0,0 +1,374 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_MARKED_PTR_H +#define __CDS_DETAILS_MARKED_PTR_H + +#include + +namespace cds { + namespace details { + + /// Marked pointer + /** + On the modern architectures, the default data alignment is 4 (for 32bit) or 8 byte for 64bit. + Therefore, the least 2 or 3 bits of the pointer is always zero and can + be used as a bitfield to store logical flags. This trick is widely used in + lock-free programming to operate with the pointer and its flags atomically. + + Template parameters: + - T - type of pointer + - Bitmask - bitmask of least unused bits + */ + template + class marked_ptr + { + T * m_ptr ; ///< pointer and its mark bits + + public: + typedef T value_type ; ///< type of value the class points to + typedef T * pointer_type ; ///< type of pointer + static CDS_CONSTEXPR_CONST uintptr_t bitmask = Bitmask ; ///< bitfield bitmask + static CDS_CONSTEXPR_CONST uintptr_t pointer_bitmask = ~bitmask ; ///< pointer bitmask + + public: + /// Constructs null marked pointer. The flag is cleared. + CDS_CONSTEXPR marked_ptr() CDS_NOEXCEPT + : m_ptr( null_ptr() ) + {} + + /// Constructs marked pointer with \p ptr value. The least bit(s) of \p ptr is the flag. + CDS_CONSTEXPR explicit marked_ptr( value_type * ptr ) CDS_NOEXCEPT + : m_ptr( ptr ) + {} + + /// Constructs marked pointer with \p ptr value and \p nMask flag. + /** + The \p nMask argument defines the OR-bits + */ + marked_ptr( value_type * ptr, int nMask ) CDS_NOEXCEPT + : m_ptr( ptr ) + { + assert( bits() == 0 ); + *this |= nMask; + } + +# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT + /// Copy constructor + marked_ptr( marked_ptr const& src ) CDS_NOEXCEPT_DEFAULTED = default; + /// Copy-assignment operator + marked_ptr& operator =( marked_ptr const& p ) CDS_NOEXCEPT_DEFAULTED = default; +# if defined(CDS_MOVE_SEMANTICS_SUPPORT) && !defined(CDS_DISABLE_DEFAULT_MOVE_CTOR) + //@cond + marked_ptr( marked_ptr&& src ) CDS_NOEXCEPT_DEFAULTED = default; + marked_ptr& operator =( marked_ptr&& p ) CDS_NOEXCEPT_DEFAULTED = default; + //@endcond +# endif +# else + /// Copy constructor + marked_ptr( marked_ptr const& src ) CDS_NOEXCEPT + : m_ptr( src.m_ptr ) + {} + + /// Copy-assignment operator + marked_ptr& operator =( marked_ptr const& p ) CDS_NOEXCEPT + { + m_ptr = p.m_ptr; + return *this; + } +# endif + + //TODO: make move ctor + + private: + //@cond + static uintptr_t to_int( value_type * p ) CDS_NOEXCEPT + { + return reinterpret_cast( p ); + } + + static value_type * to_ptr( uintptr_t n ) CDS_NOEXCEPT + { + return reinterpret_cast< value_type *>( n ); + } + + uintptr_t to_int() const CDS_NOEXCEPT + { + return to_int( m_ptr ); + } + //@endcond + + public: + /// Returns the pointer without mark bits (real pointer) const version + value_type * ptr() const CDS_NOEXCEPT + { + return to_ptr( to_int() & ~bitmask ); + } + + /// Returns the pointer and bits together + value_type * all() const CDS_NOEXCEPT + { + return m_ptr; + } + + /// Returns the least bits of pointer according to \p Bitmask template argument of the class + uintptr_t bits() const CDS_NOEXCEPT + { + return to_int() & bitmask; + } + + /// Analogue for \ref ptr + value_type * operator ->() const CDS_NOEXCEPT + { + return ptr(); + } + + /// Assignment operator sets markup bits to zero + marked_ptr operator =( T * p ) CDS_NOEXCEPT + { + m_ptr = p; + return *this; + } + + /// Set LSB bits as bits() | nBits + marked_ptr& operator |=( int nBits ) CDS_NOEXCEPT + { + assert( (nBits & pointer_bitmask) == 0 ); + m_ptr = to_ptr( to_int() | nBits ); + return *this; + } + + /// Set LSB bits as bits() & nBits + marked_ptr& operator &=( int nBits ) CDS_NOEXCEPT + { + assert( (nBits & pointer_bitmask) == 0 ); + m_ptr = to_ptr( to_int() & (pointer_bitmask | nBits) ); + return *this; + } + + /// Set LSB bits as bits() ^ nBits + marked_ptr& operator ^=( int nBits ) CDS_NOEXCEPT + { + assert( (nBits & pointer_bitmask) == 0 ); + m_ptr = to_ptr( to_int() ^ nBits ); + return *this; + } + + /// Returns p |= nBits + friend marked_ptr operator |( marked_ptr p, int nBits) CDS_NOEXCEPT + { + p |= nBits; + return p; + } + + /// Returns p |= nBits + friend marked_ptr operator |( int nBits, marked_ptr p ) CDS_NOEXCEPT + { + p |= nBits; + return p; + } + + /// Returns p &= nBits + friend marked_ptr operator &( marked_ptr p, int nBits) CDS_NOEXCEPT + { + p &= nBits; + return p; + } + + /// Returns p &= nBits + friend marked_ptr operator &( int nBits, marked_ptr p ) CDS_NOEXCEPT + { + p &= nBits; + return p; + } + + /// Returns p ^= nBits + friend marked_ptr operator ^( marked_ptr p, int nBits) CDS_NOEXCEPT + { + p ^= nBits; + return p; + } + /// Returns p ^= nBits + friend marked_ptr operator ^( int nBits, marked_ptr p ) CDS_NOEXCEPT + { + p ^= nBits; + return p; + } + + /// Inverts LSBs of pointer \p p + friend marked_ptr operator ~( marked_ptr p ) CDS_NOEXCEPT + { + return p ^ marked_ptr::bitmask; + } + + + /// Comparing two marked pointer including its mark bits + friend bool operator ==( marked_ptr p1, marked_ptr p2 ) CDS_NOEXCEPT + { + return p1.all() == p2.all(); + } + + /// Comparing marked pointer and raw pointer, mark bits of \p p1 is ignored + friend bool operator ==( marked_ptr p1, value_type const * p2 ) CDS_NOEXCEPT + { + return p1.ptr() == p2; + } + + /// Comparing marked pointer and raw pointer, mark bits of \p p2 is ignored + friend bool operator ==( value_type const * p1, marked_ptr p2 ) CDS_NOEXCEPT + { + return p1 == p2.ptr(); + } + + /// Comparing two marked pointer including its mark bits + friend bool operator !=( marked_ptr p1, marked_ptr p2 ) CDS_NOEXCEPT + { + return p1.all() != p2.all(); + } + + /// Comparing marked pointer and raw pointer, mark bits of \p p1 is ignored + friend bool operator !=( marked_ptr p1, value_type const * p2 ) CDS_NOEXCEPT + { + return p1.ptr() != p2; + } + + /// Comparing marked pointer and raw pointer, mark bits of \p p2 is ignored + friend bool operator !=( value_type const * p1, marked_ptr p2 ) CDS_NOEXCEPT + { + return p1 != p2.ptr(); + } + + //@cond + /// atomic< marked_ptr< T, Bitmask > > support + T *& impl_ref() CDS_NOEXCEPT + { + return m_ptr; + } + //@endcond + }; + } // namespace details + +} // namespace cds + +//@cond +CDS_CXX11_ATOMIC_BEGIN_NAMESPACE + + template + class atomic< cds::details::marked_ptr > + { + private: + typedef cds::details::marked_ptr marked_ptr; + typedef CDS_ATOMIC::atomic atomic_impl; + + atomic_impl m_atomic; + public: + bool is_lock_free() const volatile CDS_NOEXCEPT + { + return m_atomic.is_lock_free(); + } + bool is_lock_free() const CDS_NOEXCEPT + { + return m_atomic.is_lock_free(); + } + + void store(marked_ptr val, memory_order order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + m_atomic.store( val.all(), order ); + } + void store(marked_ptr val, memory_order order = memory_order_seq_cst) CDS_NOEXCEPT + { + m_atomic.store( val.all(), order ); + } + + marked_ptr load(memory_order order = memory_order_seq_cst) const volatile CDS_NOEXCEPT + { + return marked_ptr( m_atomic.load( order )); + } + marked_ptr load(memory_order order = memory_order_seq_cst) const CDS_NOEXCEPT + { + return marked_ptr( m_atomic.load( order )); + } + + operator marked_ptr() const volatile CDS_NOEXCEPT + { + return load(); + } + operator marked_ptr() const CDS_NOEXCEPT + { + return load(); + } + + marked_ptr exchange(marked_ptr val, memory_order order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return marked_ptr( m_atomic.exchange( val.all(), order )); + } + marked_ptr exchange(marked_ptr val, memory_order order = memory_order_seq_cst) CDS_NOEXCEPT + { + return marked_ptr( m_atomic.exchange( val.all(), order )); + } + + bool compare_exchange_weak(marked_ptr& expected, marked_ptr desired, memory_order success_order, memory_order failure_order) volatile CDS_NOEXCEPT + { + return m_atomic.compare_exchange_weak( expected.impl_ref(), desired.all(), success_order, failure_order ); + } + bool compare_exchange_weak(marked_ptr& expected, marked_ptr desired, memory_order success_order, memory_order failure_order) CDS_NOEXCEPT + { + return m_atomic.compare_exchange_weak( expected.impl_ref(), desired.all(), success_order, failure_order ); + } + bool compare_exchange_strong(marked_ptr& expected, marked_ptr desired, memory_order success_order, memory_order failure_order) volatile CDS_NOEXCEPT + { + return m_atomic.compare_exchange_strong( expected.impl_ref(), desired.all(), success_order, failure_order ); + } + bool compare_exchange_strong(marked_ptr& expected, marked_ptr desired, memory_order success_order, memory_order failure_order) CDS_NOEXCEPT + { + return m_atomic.compare_exchange_strong( expected.impl_ref(), desired.all(), success_order, failure_order ); + } + bool compare_exchange_weak(marked_ptr& expected, marked_ptr desired, memory_order success_order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return m_atomic.compare_exchange_weak( expected.impl_ref(), desired.all(), success_order ); + } + bool compare_exchange_weak(marked_ptr& expected, marked_ptr desired, memory_order success_order = memory_order_seq_cst) CDS_NOEXCEPT + { + return m_atomic.compare_exchange_weak( expected.impl_ref(), desired.all(), success_order ); + } + bool compare_exchange_strong(marked_ptr& expected, marked_ptr desired, memory_order success_order = memory_order_seq_cst) volatile CDS_NOEXCEPT + { + return m_atomic.compare_exchange_strong( expected.impl_ref(), desired.all(), success_order ); + } + bool compare_exchange_strong(marked_ptr& expected, marked_ptr desired, memory_order success_order = memory_order_seq_cst) CDS_NOEXCEPT + { + return m_atomic.compare_exchange_strong( expected.impl_ref(), desired.all(), success_order ); + } + + CDS_CONSTEXPR atomic() CDS_NOEXCEPT + : m_atomic( cds::null_ptr() ) + {} + + CDS_CONSTEXPR explicit atomic(marked_ptr val) CDS_NOEXCEPT + : m_atomic( val.all() ) + {} + CDS_CONSTEXPR explicit atomic(T * p) CDS_NOEXCEPT + : m_atomic( p ) + {} + +# ifdef CDS_CXX11_DELETE_DEFINITION_SUPPORT + atomic(const atomic&) = delete; + atomic& operator=(const atomic&) = delete; + atomic& operator=(const atomic&) volatile = delete; +# endif + + marked_ptr operator=(marked_ptr val) volatile CDS_NOEXCEPT + { + store( val ); + return val; + } + marked_ptr operator=(marked_ptr val) CDS_NOEXCEPT + { + store( val ); + return val; + } + }; + +CDS_CXX11_ATOMIC_END_NAMESPACE +//@endcond + +#endif // #ifndef __CDS_DETAILS_MARKED_PTR_H diff --git a/cds/details/noncopyable.h b/cds/details/noncopyable.h new file mode 100644 index 00000000..c2983a9a --- /dev/null +++ b/cds/details/noncopyable.h @@ -0,0 +1,15 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_NONCOPYABLE_H +#define __CDS_DETAILS_NONCOPYABLE_H + +#include + +//@cond +namespace cds { namespace details { + using boost::noncopyable; +}} // namespace cds::details +//@endcond + +#endif // __CDS_DETAILS_NONCOPYABLE_H + diff --git a/cds/details/static_functor.h b/cds/details/static_functor.h new file mode 100644 index 00000000..bba9f861 --- /dev/null +++ b/cds/details/static_functor.h @@ -0,0 +1,21 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_STATIC_FUNCTOR_H +#define __CDS_DETAILS_STATIC_FUNCTOR_H + +//@cond +namespace cds { namespace details { + + template + struct static_functor + { + static void call( T * p ) + { + Functor()( p ); + } + }; + +}} // namespace cds::details +//@endcond + +#endif // #ifndef __CDS_DETAILS_STATIC_FUNCTOR_H diff --git a/cds/details/std/chrono.h b/cds/details/std/chrono.h new file mode 100644 index 00000000..3c045aad --- /dev/null +++ b/cds/details/std/chrono.h @@ -0,0 +1,24 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_STD_CHRONO_H +#define __CDS_DETAILS_STD_CHRONO_H + +//@cond + +#include + +#ifdef CDS_CXX11_STDLIB_CHRONO +# include +namespace cds_std { + namespace chrono = std::chrono; +} +#else +# include +namespace cds_std { + namespace chrono = boost::chrono; +} +#endif + +//@endcond + +#endif // #ifndef __CDS_DETAILS_STD_CHRONO_H diff --git a/cds/details/std/condition_variable.h b/cds/details/std/condition_variable.h new file mode 100644 index 00000000..5db1e2fa --- /dev/null +++ b/cds/details/std/condition_variable.h @@ -0,0 +1,26 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_STD_CONDITION_VARIABLE_H +#define __CDS_DETAILS_STD_CONDITION_VARIABLE_H + +//@cond + +#include + +#ifdef CDS_CXX11_STDLIB_CONDITION_VARIABLE +# include + namespace cds_std { + using std::condition_variable; + using std::condition_variable_any; + } +#else +# include + namespace cds_std { + using boost::condition_variable; + using boost::condition_variable_any; + } +#endif + +//@endcond + +#endif // #ifndef __CDS_DETAILS_STD_CONDITION_VARIABLE_H diff --git a/cds/details/std/memory.h b/cds/details/std/memory.h new file mode 100644 index 00000000..8c0682ef --- /dev/null +++ b/cds/details/std/memory.h @@ -0,0 +1,108 @@ +//$$CDS-header$$ + +//@cond +#ifndef __CDS_DETAILS_STD_MEMORY_H +#define __CDS_DETAILS_STD_MEMORY_H + +#include +#include + +// ----------------------------------------------------------------- +// std::unique_ptr + +#if ((CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL) && _MSC_VER == 1500) \ + || (CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION < 40400 ) +// MS VC 2008, GCC 4.3 + +namespace std { + template struct default_delete { + default_delete() + {} + void operator()(T* p) const + { + delete p; + } + }; + + template > + class unique_ptr: private auto_ptr + { + typedef auto_ptr base_class; + + // copy ctor is deleted + template unique_ptr( unique_ptr const& s ); + + public: + unique_ptr() throw() + {} + + explicit unique_ptr( T * p ) throw() + : base_class( p ) + {} + + ~unique_ptr() + { + T * p = release(); + if ( p ) + Deleter()( p ); + } + + T * operator ->() const throw() + { + return base_class::operator->(); + } + + T& operator *() throw() + { + return base_class::operator*(); + } + + unique_ptr& operator=( T * p ) + { + base_class::operator=(p); + return *this; + } + + T * get() const throw() + { + return base_class::get(); + } + + T * release() throw() + { + return base_class::release(); + } + + void reset( T * p ) + { + T * pOld = release(); + assert( p != pOld ); + if ( pOld ) + Deleter()( pOld ); + base_class::reset( p ); + } + }; +} + +#endif +// ----------------------------------------------------------------- +// std::shared_ptr + +#if (CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL) && _MSC_VER == 1500 +// MS VC 2008 + +#include +#include +#include + +namespace std { + using boost::shared_ptr; + using boost::make_shared; + using boost::allocate_shared; + using boost::enable_shared_from_this; +} // namespace std +#endif + + +#endif // #ifndef __CDS_DETAILS_STD_MEMORY_H +//@endcond diff --git a/cds/details/std/mutex.h b/cds/details/std/mutex.h new file mode 100644 index 00000000..7bd3533c --- /dev/null +++ b/cds/details/std/mutex.h @@ -0,0 +1,39 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_STD_MUTEX_H +#define __CDS_DETAILS_STD_MUTEX_H + +//@cond + +#include + +#ifdef CDS_CXX11_STDLIB_MUTEX +# include + namespace cds_std { + using std::mutex; + using std::recursive_mutex; + using std::unique_lock; + using std::lock_guard; + using std::adopt_lock_t; + } +#else +# include +# include +# if BOOST_VERSION >= 105300 +# include +# include +# else +# include +# endif + namespace cds_std { + using boost::mutex; + using boost::recursive_mutex; + using boost::unique_lock; + using boost::lock_guard; + using boost::adopt_lock_t; + } +#endif + +//@endcond + +#endif // #ifndef __CDS_DETAILS_STD_MUTEX_H diff --git a/cds/details/std/thread.h b/cds/details/std/thread.h new file mode 100644 index 00000000..ff7af37a --- /dev/null +++ b/cds/details/std/thread.h @@ -0,0 +1,26 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_STD_THREAD_H +#define __CDS_DETAILS_STD_THREAD_H + +//@cond + +#include + +#ifdef CDS_CXX11_STDLIB_THREAD +# include + namespace cds_std { + using std::thread; + namespace this_thread = std::this_thread; + } +#else +# include + namespace cds_std { + using boost::thread; + namespace this_thread = boost::this_thread; + } +#endif + +//@endcond + +#endif // #ifndef __CDS_DETAILS_STD_THREAD_H diff --git a/cds/details/std/tuple.h b/cds/details/std/tuple.h new file mode 100644 index 00000000..97b935f7 --- /dev/null +++ b/cds/details/std/tuple.h @@ -0,0 +1,30 @@ +//$$CDS-header$$ + +//@cond +#ifndef __CDS_DETAILS_STD_TUPLE_H +#define __CDS_DETAILS_STD_TUPLE_H + +#include + +#if (CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL) && _MSC_VER < 1600 +// MS VC 2008 +#include + +namespace std { + using boost::tuple; + using boost::get; + using boost::make_tuple; + using boost::tie; + + template + struct tuple_size { + static size_t const value = boost::tuples::length::value; + }; + +} // namespace std +#else +# include +#endif + +#endif // #ifndef __CDS_DETAILS_STD_MEMORY_H +//@endcond diff --git a/cds/details/std/type_traits.h b/cds/details/std/type_traits.h new file mode 100644 index 00000000..2fca3ab5 --- /dev/null +++ b/cds/details/std/type_traits.h @@ -0,0 +1,89 @@ +//$$CDS-header$$ + +//@cond +#ifndef __CDS_DETAILS_STD_TYPE_TRAITS_H +#define __CDS_DETAILS_STD_TYPE_TRAITS_H + +#include +#include + +// ----------------------------------------------------------------- +// std::integral_constant, std::true_type, std::false_type + +#if (CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL) && _MSC_VER == 1500 + +namespace std { + using std::tr1::integral_constant; + using std::tr1::true_type; + using std::tr1::false_type; +} // namespace std +#endif + + +// ----------------------------------------------------------------- +// std::is_same, std::is_base_of + +#if (CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL) && _MSC_VER == 1500 + +namespace std { + using std::tr1::is_same; + using std::tr1::is_base_of; +} // namespace std +#endif + + +// ----------------------------------------------------------------- +// std::conditional + +#if (CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL) && _MSC_VER == 1500 + +#include +namespace std { + using boost::conditional; +} // namespace std +#endif + +// ----------------------------------------------------------------- +// std::decay + +#if (CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL) && _MSC_VER == 1500 + +#include +namespace std { + using boost::decay; +} // namespace std +#endif + +// ----------------------------------------------------------------- +// std::enable_if + +#if (CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL) && _MSC_VER == 1500 + +namespace std { + + template + struct enable_if + {}; + + template + struct enable_if + { + typedef Type type; + }; + +} // namespace std +#endif + +// ----------------------------------------------------------------- +// std::remove_const, std::remove_cv, std::remove_volatile +#if (CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL) && _MSC_VER == 1500 +namespace std { + using std::tr1::remove_const; + using std::tr1::remove_volatile; + using std::tr1::remove_cv; + using std::tr1::remove_reference; +} +#endif + +#endif // #ifndef __CDS_DETAILS_STD_TYPE_TRAITS_H +//@endcond diff --git a/cds/details/trivial_assign.h b/cds/details/trivial_assign.h new file mode 100644 index 00000000..6e5d4c2a --- /dev/null +++ b/cds/details/trivial_assign.h @@ -0,0 +1,22 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_TRIVIAL_ASSIGN_H +#define __CDS_DETAILS_TRIVIAL_ASSIGN_H + +#include + +//@cond +namespace cds { namespace details { + + template + struct trivial_assign + { + Dest& operator()( Dest& dest, const Source& src ) + { + return dest = src; + } + }; +}} // namespace cds::details +//@endcond + +#endif // #ifndef __CDS_DETAILS_TRIVIAL_ASSIGN_H diff --git a/cds/details/type_padding.h b/cds/details/type_padding.h new file mode 100644 index 00000000..a1bf5b01 --- /dev/null +++ b/cds/details/type_padding.h @@ -0,0 +1,56 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_TYPE_PADDING_H +#define __CDS_DETAILS_TYPE_PADDING_H + +namespace cds { namespace details { + + //@cond none + template + struct type_padding_helper: public T + { + enum { + value = Modulo + }; + char _[Align - Modulo] ; // padding + + type_padding_helper() CDS_NOEXCEPT_( noexcept( T() )) + {} + }; + template + struct type_padding_helper: public T + { + enum { + value = 0 + }; + + type_padding_helper() CDS_NOEXCEPT_( noexcept( T()) ) + {} + }; + //@endcond + + /// Automatic alignment type \p T to \p AlignFactor + /** + The class adds appropriate bytes to type T that the following condition is true: + \code + sizeof( type_padding::type ) % AlignFactor == 0 + \endcode + It is guaranteed that count of padding bytes no more than AlignFactor - 1. + + \b Applicability: type \p T must not have constructors another that default ctor. + For example, \p T may be any POD type. + */ + template + class type_padding { + public: + /// Result type + typedef type_padding_helper type; + + /// Padding constant + enum { + value = type::value + }; + }; + +}} // namespace cds::details +#endif // #ifndef __CDS_DETAILS_TYPE_PADDING_H diff --git a/cds/details/void_selector.h b/cds/details/void_selector.h new file mode 100644 index 00000000..198cfb11 --- /dev/null +++ b/cds/details/void_selector.h @@ -0,0 +1,27 @@ +//$$CDS-header$$ + +#ifndef __CDS_DETAILS_VOID_SELECTOR_H +#define __CDS_DETAILS_VOID_SELECTOR_H + +#include + +namespace cds { + namespace details { + + /// Void type selector + /** + This metafunction is equal to the following expression: + \code + std::conditional< std::is_same< T, void >::value, Void, NoVoid >::type + \endcode + + The \p NoVoid is optional, default is \p NoVoid == T + */ + template + struct void_selector: public std::conditional< std::is_same< T, void >::value, Void, NoVoid > + {}; + + } // namespace details +} // namespace cds + +#endif // #ifndef __CDS_DETAILS_VOID_SELECTOR_H diff --git a/cds/gc/all.h b/cds/gc/all.h new file mode 100644 index 00000000..f5a79732 --- /dev/null +++ b/cds/gc/all.h @@ -0,0 +1,10 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_ALL_H +#define __CDS_GC_ALL_H + +#include +#include +#include + +#endif // #ifndef __CDS_GC_ALL_H diff --git a/cds/gc/default_gc.h b/cds/gc/default_gc.h new file mode 100644 index 00000000..0fb888a9 --- /dev/null +++ b/cds/gc/default_gc.h @@ -0,0 +1,16 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_DEFAULT_GC_H +#define __CDS_GC_DEFAULT_GC_H + +#include + +namespace cds { namespace gc { + + /// Default garbage collector + typedef HP default_gc; + +}} // namespace cds::gc + + +#endif // #ifndef __CDS_GC_DEFAULT_GC_H diff --git a/cds/gc/details/retired_ptr.h b/cds/gc/details/retired_ptr.h new file mode 100644 index 00000000..92d84d30 --- /dev/null +++ b/cds/gc/details/retired_ptr.h @@ -0,0 +1,92 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_DETAILS_RETIRED_PTR_H +#define __CDS_GC_DETAILS_RETIRED_PTR_H + +#include + +namespace cds { namespace gc { + /// Common implementation details for any GC + namespace details { + + /// Pointer to function to free (destruct and deallocate) retired pointer of specific type + typedef void (* free_retired_ptr_func )( void * ); + + /// Retired pointer + /** + Pointer to an object that is ready to delete. + */ + struct retired_ptr { + /// Pointer type + typedef void * pointer; + + pointer m_p ; ///< retired pointer + free_retired_ptr_func m_funcFree ; ///< pointer to the destructor function + + /// Comparison of two retired pointers + static bool less( const retired_ptr& p1, const retired_ptr& p2 ) + { + return p1.m_p < p2.m_p; + } + + /// Default ctor initializes pointer to NULL + retired_ptr() + : m_p( NULL ) + , m_funcFree( NULL ) + {} + + /// Ctor + retired_ptr( pointer p, free_retired_ptr_func func ) + : m_p( p ), + m_funcFree( func ) + {} + + /// Typecasting ctor + template + retired_ptr( T * p, void (* pFreeFunc)(T *)) + : m_p( reinterpret_cast( p ) ) + , m_funcFree( reinterpret_cast< free_retired_ptr_func >( pFreeFunc )) + {} + + /// Assignment operator + retired_ptr& operator =( const retired_ptr& s) + { + m_p = s.m_p; + m_funcFree = s.m_funcFree; + return *this; + } + + /// Invokes destructor function for the pointer + void free() + { + assert( m_funcFree != NULL ); + assert( m_p != NULL ); + m_funcFree( m_p ); + + CDS_STRICT_DO( m_p = null_ptr() ); + CDS_STRICT_DO( m_funcFree = null_ptr()); + } + }; + + //@cond + static inline bool operator <( const retired_ptr& p1, const retired_ptr& p2 ) + { + return retired_ptr::less( p1, p2 ); + } + + static inline bool operator ==( const retired_ptr& p1, const retired_ptr& p2 ) + { + return p1.m_p == p2.m_p; + } + + static inline bool operator !=( const retired_ptr& p1, const retired_ptr& p2 ) + { + return !(p1 == p2); + } + //@endcond + + + } // namespace details +}} // namespace cds::gc + +#endif // #ifndef __CDS_GC_DETAILS_RETIRED_PTR_H diff --git a/cds/gc/exception.h b/cds/gc/exception.h new file mode 100644 index 00000000..797bbb6b --- /dev/null +++ b/cds/gc/exception.h @@ -0,0 +1,15 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_EXCEPTION_H +#define __CDS_GC_EXCEPTION_H + +#include + +namespace cds { namespace gc { + + /// %Exception "Too few hazard pointers" + CDS_DECLARE_EXCEPTION( too_few_hazard_pointers, "Too few hazard pointers" ); + +}} // namespace cds::gc + +#endif // #ifndef __CDS_GC_EXCEPTION_H diff --git a/cds/gc/gc_fwd.h b/cds/gc/gc_fwd.h new file mode 100644 index 00000000..7a6d5b1a --- /dev/null +++ b/cds/gc/gc_fwd.h @@ -0,0 +1,19 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_FORWARD_H +#define __CDS_GC_FORWARD_H + +#include + +//@cond +namespace cds { namespace gc { + class HP; + class HRC; + class PTB; + + class nogc; +}} // namespace cds::gc + +//@endcond + +#endif // #ifndef __CDS_GC_FORWARD_H diff --git a/cds/gc/guarded_ptr.h b/cds/gc/guarded_ptr.h new file mode 100644 index 00000000..be93c565 --- /dev/null +++ b/cds/gc/guarded_ptr.h @@ -0,0 +1,218 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_GUARDED_PTR_H +#define __CDS_GC_GUARDED_PTR_H + +#include + +namespace cds { namespace gc { + + /// Guarded pointer + /** + A guarded pointer is a pair of the pointer and GC's guard. + Usually, it is used for returning a pointer to the item from an lock-free container. + The guard prevents the pointer to be early disposed (freed) by GC. + After destructing \p %guarded_ptr object the pointer can be automatically disposed (freed) at any time. + + Template arguments: + - \p GC - a garbage collector type like cds::gc::HP and any other from cds::gc namespace + - \p GuardedType - a type which the guard stores + - \p ValueType - a value type + - \p Cast - a functor for converting GuardedType* to ValueType*. Default is \p void (no casting). + + For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed. + In such case the \p %guarded_ptr is: + @code + typedef cds::gc::guarded_ptr< cds::gc::HP, foo > intrusive_guarded_ptr; + @endcode + + For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed. + For example: + @code + struct foo { + int const key; + std::string value; + }; + + struct value_accessor { + std::string* operator()( foo* pFoo ) const + { + return &(pFoo->value); + } + }; + + // Guarded ptr + typedef cds::gc::guarded_ptr< cds::gc::HP, Foo, std::string, value_accessor > nonintrusive_guarded_ptr; + @endcode + + Many set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor. + */ + template + class guarded_ptr + { + //TODO: use moce semantics and explicit operator bool! + public: + typedef GC gc ; ///< Garbage collector like cds::gc::HP and any other from cds::gc namespace + typedef GuardedType guarded_type; ///< Guarded type + typedef ValueType value_type ; ///< Value type + typedef Cast value_cast ; ///< Functor for casting \p guarded_type to \p value_type + + private: + //@cond + typename gc::Guard m_guard; + //@endcond + + public: + /// Creates empty guarded pointer + guarded_ptr() CDS_NOEXCEPT + {} + + /// Initializes guarded pointer with \p p + guarded_ptr( guarded_type * p ) CDS_NOEXCEPT + { + m_guard.assign( p ); + } + + /// Copy constructor + guarded_ptr( guarded_ptr const& gp ) CDS_NOEXCEPT + { + m_guard.copy( gp.m_guard ); + } + + /// Clears the guarded pointer + /** + \ref release is called if guarded pointer is not \ref empty + */ + ~guarded_ptr() CDS_NOEXCEPT + { + release(); + } + + /// Assignment operator + guarded_ptr& operator=( guarded_ptr const& gp ) CDS_NOEXCEPT + { + m_guard.copy( gp.m_guard ); + return *this; + } + + /// Returns a pointer to guarded value + value_type * operator ->() const CDS_NOEXCEPT + { + return value_cast()( m_guard.template get() ); + } + + /// Returns a reference to guarded value + value_type& operator *() CDS_NOEXCEPT + { + assert( !empty()); + return *value_cast()( m_guard.template get() ); + } + + /// Returns const reference to guarded value + value_type const& operator *() const CDS_NOEXCEPT + { + assert( !empty()); + return *value_cast()( m_guard.template get() ); + } + + /// Checks if the guarded pointer is \p NULL + bool empty() const CDS_NOEXCEPT + { + return m_guard.template get() == null_ptr(); + } + + /// Clears guarded pointer + /** + If the guarded pointer has been released, the pointer can be disposed (freed) at any time. + Dereferncing the guarded pointer after \p release() is dangerous. + */ + void release() CDS_NOEXCEPT + { + m_guard.clear(); + } + + //@cond + // For internal use only!!! + typename gc::Guard& guard() CDS_NOEXCEPT + { + return m_guard; + } + //@endcond + }; + + + //@cond + // Intrusive specialization + template + class guarded_ptr< GC, T, T, void > + { + public: + typedef GC gc ; ///< Garbage collector like cds::gc::HP + typedef T guarded_type; ///< Guarded type + typedef T value_type ; ///< Value type + + private: + typename gc::Guard m_guard; + + public: + guarded_ptr() CDS_NOEXCEPT + {} + + guarded_ptr( value_type * p ) CDS_NOEXCEPT + { + m_guard.assign( p ); + } + + guarded_ptr( guarded_ptr const& gp ) CDS_NOEXCEPT + { + m_guard.copy( gp.m_guard ); + } + + ~guarded_ptr() CDS_NOEXCEPT + { + release(); + } + + guarded_ptr& operator=( guarded_ptr const& gp ) CDS_NOEXCEPT + { + m_guard.copy( gp.m_guard ); + return *this; + } + + value_type * operator ->() const CDS_NOEXCEPT + { + return m_guard.template get(); + } + + value_type& operator *() CDS_NOEXCEPT + { + assert( !empty()); + return *m_guard.template get(); + } + + value_type const& operator *() const CDS_NOEXCEPT + { + assert( !empty()); + return *m_guard.template get(); + } + + bool empty() const CDS_NOEXCEPT + { + return m_guard.template get() == null_ptr(); + } + + void release() CDS_NOEXCEPT + { + m_guard.clear(); + } + + typename gc::Guard& guard() CDS_NOEXCEPT + { + return m_guard; + } + }; + //@endcond + +}} // namespace cds::gc + +#endif // #ifndef __CDS_GC_GUARDED_PTR_H diff --git a/cds/gc/hp.h b/cds/gc/hp.h new file mode 100644 index 00000000..2c87f006 --- /dev/null +++ b/cds/gc/hp.h @@ -0,0 +1,10 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_HP_H +#define __CDS_GC_HP_H + +#include +#include +#include + +#endif // #ifndef __CDS_GC_HP_H diff --git a/cds/gc/hp_decl.h b/cds/gc/hp_decl.h new file mode 100644 index 00000000..4b6d0edc --- /dev/null +++ b/cds/gc/hp_decl.h @@ -0,0 +1,567 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_HP_DECL_H +#define __CDS_GC_HP_DECL_H + +#include +#include + +namespace cds { namespace gc { + /// @defgroup cds_garbage_collector Garbage collectors + + /// Hazard Pointer garbage collector + /** @ingroup cds_garbage_collector + @headerfile cds/gc/hp.h + + This class realizes a wrapper for Hazard Pointer garbage collector internal implementation. + + Sources: + - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes" + - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects" + - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers" + + See \ref cds_how_to_use "How to use" section for details of garbage collector applying. + */ + class HP + { + public: + /// Native guarded pointer type + typedef gc::hzp::hazard_pointer guarded_pointer; + +#ifdef CDS_CXX11_TEMPLATE_ALIAS_SUPPORT + /// Atomic reference + /** + @headerfile cds/gc/hp.h + */ + template using atomic_ref = CDS_ATOMIC::atomic; + + /// Atomic marked pointer + /** + @headerfile cds/gc/hp.h + */ + template using atomic_marked_ptr = CDS_ATOMIC::atomic; + + /// Atomic type + /** + @headerfile cds/gc/hp.h + */ + template using atomic_type = CDS_ATOMIC::atomic; +#else + template + class atomic_ref: public CDS_ATOMIC::atomic + { + typedef CDS_ATOMIC::atomic base_class; + public: +# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT + atomic_ref() = default; +# else + atomic_ref() + : base_class() + {} +# endif + explicit CDS_CONSTEXPR atomic_ref(T * p) CDS_NOEXCEPT + : base_class( p ) + {} + }; + + template + class atomic_type: public CDS_ATOMIC::atomic + { + typedef CDS_ATOMIC::atomic base_class; + public: +# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT + atomic_type() = default; +# else + atomic_type() CDS_NOEXCEPT + : base_class() + {} +# endif + explicit CDS_CONSTEXPR atomic_type(T const & v) CDS_NOEXCEPT + : base_class( v ) + {} + }; + + template + class atomic_marked_ptr: public CDS_ATOMIC::atomic + { + typedef CDS_ATOMIC::atomic base_class; + public: +# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT + atomic_marked_ptr() CDS_NOEXCEPT_DEFAULTED_( noexcept(base_class()) ) = default; +# else + atomic_marked_ptr() CDS_NOEXCEPT_( noexcept(base_class()) ) + : base_class() + {} +# endif + explicit CDS_CONSTEXPR atomic_marked_ptr(MarkedPtr val) CDS_NOEXCEPT_( noexcept(base_class( val )) ) + : base_class( val ) + {} + explicit CDS_CONSTEXPR atomic_marked_ptr(typename MarkedPtr::value_type * p) CDS_NOEXCEPT_( noexcept(base_class( p )) ) + : base_class( p ) + {} + }; +#endif + + /// Thread GC implementation for internal usage + typedef hzp::ThreadGC thread_gc_impl; + + /// Wrapper for hzp::ThreadGC class + /** + @headerfile cds/gc/hp.h + This class performs automatically attaching/detaching Hazard Pointer GC + for the current thread. + */ + class thread_gc: public thread_gc_impl + { + //@cond + bool m_bPersistent; + //@endcond + public: + + /// Constructor + /** + The constructor attaches the current thread to the Hazard Pointer GC + if it is not yet attached. + The \p bPersistent parameter specifies attachment persistence: + - \p true - the class destructor will not detach the thread from Hazard Pointer GC. + - \p false (default) - the class destructor will detach the thread from Hazard Pointer GC. + */ + thread_gc( + bool bPersistent = false + ) ; //inline in hp_impl.h + + /// Destructor + /** + If the object has been created in persistent mode, the destructor does nothing. + Otherwise it detaches the current thread from Hazard Pointer GC. + */ + ~thread_gc() ; // inline in hp_impl.h + }; + + /// Base for container node + /** + @headerfile cds/gc/hp.h + This struct is empty for Hazard Pointer GC + */ + struct container_node + {}; + + /// Hazard Pointer guard + /** + @headerfile cds/gc/hp.h + This class is a wrapper for hzp::AutoHPGuard. + */ + class Guard: public hzp::AutoHPGuard + { + //@cond + typedef hzp::AutoHPGuard base_class; + //@endcond + + public: + //@cond + Guard() ; // inline in hp_impl.h + //@endcond + + /// Protects a pointer of type \p atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store it + to the HP slot repeatedly until the guard's value equals \p toGuard + */ + template + T protect( CDS_ATOMIC::atomic const& toGuard ) + { + T pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed); + T pRet; + do { + pRet = assign( pCur ); + pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire); + } while ( pRet != pCur ); + return pCur; + } + + /// Protects a converted pointer of type \p atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store result of \p f functor + to the HP slot repeatedly until the guard's value equals \p toGuard. + + The function is useful for intrusive containers when \p toGuard is a node pointer + that should be converted to a pointer to the value type before protecting. + The parameter \p f of type Func is a functor that makes this conversion: + \code + struct functor { + value_type * operator()( T * p ); + }; + \endcode + Really, the result of f( toGuard.load() ) is assigned to the hazard pointer. + */ + template + T protect( CDS_ATOMIC::atomic const& toGuard, Func f ) + { + T pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed); + T pRet; + do { + pRet = pCur; + assign( f( pCur ) ); + pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire); + } while ( pRet != pCur ); + return pCur; + } + + /// Store \p p to the guard + /** + The function equals to a simple assignment the value \p p to guard, no loop is performed. + Can be used for a pointer that cannot be changed concurrently + */ + template + T * assign( T * p ) + { + return base_class::operator =(p); + } + + /// Copy from \p src guard to \p this guard + void copy( Guard const& src ) + { + assign( src.get_native() ); + } + + /// Store marked pointer \p p to the guard + /** + The function equals to a simple assignment of p.ptr(), no loop is performed. + Can be used for a marked pointer that cannot be changed concurrently. + */ + template + T * assign( cds::details::marked_ptr p ) + { + return base_class::operator =( p.ptr() ); + } + + /// Clear value of the guard + void clear() + { + assign( reinterpret_cast(NULL) ); + } + + /// Get the value currently protected + template + T * get() const + { + return reinterpret_cast( get_native() ); + } + + /// Get native hazard pointer stored + guarded_pointer get_native() const + { + return base_class::get(); + } + }; + + /// Array of Hazard Pointer guards + /** + @headerfile cds/gc/hp.h + This class is a wrapper for hzp::AutoHPArray template. + Template parameter \p Count defines the size of HP array. + */ + template + class GuardArray: public hzp::AutoHPArray + { + //@cond + typedef hzp::AutoHPArray base_class; + //@endcond + public: + /// Rebind array for other size \p Count2 + template + struct rebind { + typedef GuardArray other ; ///< rebinding result + }; + + public: + //@cond + GuardArray() ; // inline in hp_impl.h + //@endcond + /// Protects a pointer of type \p atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store it + to the slot \p nIndex repeatedly until the guard's value equals \p toGuard + */ + template + T protect(size_t nIndex, CDS_ATOMIC::atomic const& toGuard ) + { + T pRet; + do { + pRet = assign( nIndex, toGuard.load(CDS_ATOMIC::memory_order_acquire) ); + } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_relaxed)); + + return pRet; + } + + /// Protects a pointer of type \p atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store it + to the slot \p nIndex repeatedly until the guard's value equals \p toGuard + + The function is useful for intrusive containers when \p toGuard is a node pointer + that should be converted to a pointer to the value type before guarding. + The parameter \p f of type Func is a functor that makes this conversion: + \code + struct functor { + value_type * operator()( T * p ); + }; + \endcode + Really, the result of f( toGuard.load() ) is assigned to the hazard pointer. + */ + template + T protect(size_t nIndex, CDS_ATOMIC::atomic const& toGuard, Func f ) + { + T pRet; + do { + assign( nIndex, f( pRet = toGuard.load(CDS_ATOMIC::memory_order_acquire) )); + } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_relaxed)); + + return pRet; + } + + /// Store \p to the slot \p nIndex + /** + The function equals to a simple assignment, no loop is performed. + */ + template + T * assign( size_t nIndex, T * p ) + { + base_class::set(nIndex, p); + return p; + } + + /// Store marked pointer \p p to the guard + /** + The function equals to a simple assignment of p.ptr(), no loop is performed. + Can be used for a marked pointer that cannot be changed concurrently. + */ + template + T * assign( size_t nIndex, cds::details::marked_ptr p ) + { + return assign( nIndex, p.ptr() ); + } + + /// Copy guarded value from \p src guard to slot at index \p nIndex + void copy( size_t nIndex, Guard const& src ) + { + assign( nIndex, src.get_native() ); + } + + /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex + void copy( size_t nDestIndex, size_t nSrcIndex ) + { + assign( nDestIndex, get_native( nSrcIndex )); + } + + /// Clear value of the slot \p nIndex + void clear( size_t nIndex) + { + base_class::clear( nIndex ); + } + + /// Get current value of slot \p nIndex + template + T * get( size_t nIndex) const + { + return reinterpret_cast( get_native( nIndex ) ); + } + + /// Get native hazard pointer stored + guarded_pointer get_native( size_t nIndex ) const + { + return base_class::operator[](nIndex).get(); + } + + /// Capacity of the guard array + static CDS_CONSTEXPR size_t capacity() + { + return Count; + } + }; + + public: + /// Initializes hzp::GarbageCollector singleton + /** + The constructor initializes GC singleton with passed parameters. + If GC instance is not exist then the function creates the instance. + Otherwise it does nothing. + + The Michael's HP reclamation schema depends of three parameters: + - \p nHazardPtrCount - hazard pointer count per thread. Usually it is small number (up to 10) depending from + the data structure algorithms. By default, if \p nHazardPtrCount = 0, the function + uses maximum of the hazard pointer count for CDS library. + - \p nMaxThreadCount - max count of thread with using Hazard Pointer GC in your application. Default is 100. + - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than + nHazardPtrCount * nMaxThreadCount . Default is 2 * nHazardPtrCount * nMaxThreadCount . + */ + HP( + size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread + size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application + size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread + hzp::scan_type nScanType = hzp::inplace ///< Scan type (see \ref hzp::scan_type enum) + ) + { + hzp::GarbageCollector::Construct( + nHazardPtrCount, + nMaxThreadCount, + nMaxRetiredPtrCount, + nScanType + ); + } + + /// Terminates GC singleton + /** + The destructor calls \code hzp::GarbageCollector::Destruct( true ) \endcode + */ + ~HP() + { + hzp::GarbageCollector::Destruct( true ); + } + + /// Checks if count of hazard pointer is no less than \p nCountNeeded + /** + If \p bRaiseException is \p true (that is the default), the function raises an exception gc::too_few_hazard_pointers + if \p nCountNeeded is more than the count of hazard pointer per thread. + */ + static bool check_available_guards( size_t nCountNeeded, bool bRaiseException = true ) + { + if ( hzp::GarbageCollector::instance().getHazardPointerCount() < nCountNeeded ) { + if ( bRaiseException ) + throw cds::gc::too_few_hazard_pointers(); + return false; + } + return true; + } + + /// Returns max Hazard Pointer count + size_t max_hazard_count() const + { + return hzp::GarbageCollector::instance().getHazardPointerCount(); + } + + /// Returns max count of thread + size_t max_thread_count() const + { + return hzp::GarbageCollector::instance().getMaxThreadCount(); + } + + /// Returns capacity of retired pointer array + size_t retired_array_capacity() const + { + return hzp::GarbageCollector::instance().getMaxRetiredPtrCount(); + } + + /// Retire pointer \p p with function \p pFunc + /** + The function places pointer \p p to array of pointers ready for removing. + (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it. + Deleting the pointer is the function \p pFunc call. + */ + template + static void retire( T * p, void (* pFunc)(T *) ) ; // inline in hp_impl.h + + /// Retire pointer \p p with functor of type \p Disposer + /** + The function places pointer \p p to array of pointers ready for removing. + (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it. + + Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is: + \code + template + struct disposer { + void operator()( T * p ) ; // disposing operator + }; + \endcode + Since the functor call can happen at any time after \p retire call, additional restrictions are imposed to \p Disposer type: + - it should be stateless functor + - it should be default-constructible + - the result of functor call with argument \p p should not depend on where the functor will be called. + + \par Examples: + Operator \p delete functor: + \code + template + struct disposer { + void operator ()( T * p ) { + delete p; + } + }; + + // How to call GC::retire method + int * p = new int; + + // ... use p in lock-free manner + + cds::gc::HP::retire( p ) ; // place p to retired pointer array of HP GC + \endcode + + Functor based on \p std::allocator : + \code + template > + struct disposer { + template + void operator()( T * p ) { + typedef typename ALLOC::templare rebind::other alloc_t; + alloc_t a; + a.destroy( p ); + a.deallocate( p, 1 ); + } + }; + \endcode + */ + template + static void retire( T * p ) ; // inline in hp_impl.h + + /// Get current scan strategy + /**@anchor hrc_gc_HP_getScanType + See hzp::GarbageCollector::Scan for scan algo description + */ + hzp::scan_type getScanType() const + { + return hzp::GarbageCollector::instance().getScanType(); + } + + /// Set current scan strategy + /** + Scan strategy changing is allowed on the fly. + + About scan strategy see \ref hrc_gc_HP_getScanType "getScanType" + */ + void setScanType( + hzp::scan_type nScanType ///< new scan strategy + ) + { + hzp::GarbageCollector::instance().setScanType( nScanType ); + } + + /// Checks if Hazard Pointer GC is constructed and may be used + static bool isUsed() + { + return hzp::GarbageCollector::isUsed(); + } + + + /// Forced GC cycle call for current thread + /** + Usually, this function should not be called directly. + */ + static void scan() ; // inline in hp_impl.h + + /// Synonym for \ref scan() + static void force_dispose() + { + scan(); + } + }; +}} // namespace cds::gc + +#endif // #ifndef __CDS_GC_HP_DECL_H diff --git a/cds/gc/hp_impl.h b/cds/gc/hp_impl.h new file mode 100644 index 00000000..d2006e37 --- /dev/null +++ b/cds/gc/hp_impl.h @@ -0,0 +1,57 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_HP_IMPL_H +#define __CDS_GC_HP_IMPL_H + +#include +#include + +//@cond +namespace cds { namespace gc { + + inline HP::thread_gc::thread_gc( + bool bPersistent + ) + : m_bPersistent( bPersistent ) + { + if ( !threading::Manager::isThreadAttached() ) + threading::Manager::attachThread(); + } + + inline HP::thread_gc::~thread_gc() + { + if ( !m_bPersistent ) + cds::threading::Manager::detachThread(); + } + + inline HP::Guard::Guard() + : Guard::base_class( cds::threading::getGC() ) + {} + + template + inline HP::GuardArray::GuardArray() + : GuardArray::base_class( cds::threading::getGC() ) + {} + + template + inline void HP::retire( T * p, void (* pFunc)(T *) ) + { + cds::threading::getGC().retirePtr( p, pFunc ); + } + + template + inline void HP::retire( T * p ) + { + cds::threading::getGC().retirePtr( p, cds::details::static_functor::call ); + } + + inline void HP::scan() + { + cds::threading::getGC().scan(); + } + + +}} // namespace cds::gc +//@endcond + +#endif // #ifndef __CDS_GC_HP_IMPL_H diff --git a/cds/gc/hrc.h b/cds/gc/hrc.h new file mode 100644 index 00000000..344ef31f --- /dev/null +++ b/cds/gc/hrc.h @@ -0,0 +1,10 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_HRC_H +#define __CDS_GC_HRC_H + +#include +#include +#include + +#endif // #ifndef __CDS_GC_HRC_H diff --git a/cds/gc/hrc/details/hrc_fwd.h b/cds/gc/hrc/details/hrc_fwd.h new file mode 100644 index 00000000..7d44e77d --- /dev/null +++ b/cds/gc/hrc/details/hrc_fwd.h @@ -0,0 +1,16 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_HRC_SCHEMA_FWD_H +#define __CDS_GC_HRC_SCHEMA_FWD_H + +namespace cds { namespace gc { namespace hrc { + + // forward declaration + class GarbageCollector; + class ThreadGC; + + class ContainerNode; + class Container; +}}} + +#endif // #ifndef __CDS_GC_HRC_SCHEMA_FWD_H diff --git a/cds/gc/hrc/details/hrc_inline.h b/cds/gc/hrc/details/hrc_inline.h new file mode 100644 index 00000000..a116582b --- /dev/null +++ b/cds/gc/hrc/details/hrc_inline.h @@ -0,0 +1,64 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_HRC_SCHEMA_INLINE_H +#define __CDS_GC_HRC_SCHEMA_INLINE_H + +//@cond +namespace cds { namespace gc { namespace hrc { + + //------------------------------------------------------------------- + // Inlines + //------------------------------------------------------------------- + + namespace details { + inline retired_vector::retired_vector( const GarbageCollector& gc ) + : m_nFreeList(0) + , m_arr( gc.getMaxRetiredPtrCount() ) + { + for ( size_t i = 0; i < m_arr.capacity(); ++i ) + m_arr[i].m_nNextFree = i + 1; + m_arr[ m_arr.capacity() - 1 ].m_nNextFree = m_nEndFreeList; + } + + inline thread_descriptor::thread_descriptor( const GarbageCollector& gc ) + : m_hzp( gc.getHazardPointerCount() ) + , m_arrRetired( gc ) + {} + + } // namespace details + + inline ContainerNode::ContainerNode() + : m_bTrace( false ) + , m_bDeleted( false ) + { + CDS_DEBUG_DO( GarbageCollector::instance().dbgNodeConstructed() ; ) + } + + inline ContainerNode::~ContainerNode() + { + assert( m_RC == 0 ); + CDS_DEBUG_DO( GarbageCollector::instance().dbgNodeDestructed() ; ) + } + + inline void GarbageCollector::try_retire( ThreadGC * pThreadGC ) + { + CDS_DEBUG_DO( unsigned int nAttempt = 0 ); + + do { + pThreadGC->cleanUpLocal(); + Scan( pThreadGC ); + HelpScan( pThreadGC ); + + if ( pThreadGC->m_pDesc->m_arrRetired.isFull() ) + CleanUpAll( pThreadGC ); + + // infinite loop? + assert( ++nAttempt <= 3 ); + } while ( pThreadGC->m_pDesc->m_arrRetired.isFull() ); + } + + +} } } // namespace cds::gc::hrc +//@endcond + +#endif // #ifndef __CDS_GC_HRC_SCHEMA_INLINE_H diff --git a/cds/gc/hrc/details/hrc_retired.h b/cds/gc/hrc/details/hrc_retired.h new file mode 100644 index 00000000..79262891 --- /dev/null +++ b/cds/gc/hrc/details/hrc_retired.h @@ -0,0 +1,193 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_HRC_SCHEMA_RETIRED_H +#define __CDS_GC_HRC_SCHEMA_RETIRED_H + +#include +#include +#include +#include + +namespace cds { namespace gc { namespace hrc { + namespace details { + + /// Pointer to function to free (destruct and deallocate) retired pointer of specific type + typedef gc::details::free_retired_ptr_func free_retired_ptr_func; + + /// Retired node descriptor + struct retired_node { + CDS_ATOMIC::atomic m_pNode ; ///< node to destroy + free_retired_ptr_func m_funcFree ; ///< pointer to the destructor function + size_t m_nNextFree ; ///< Next free item in retired array + CDS_ATOMIC::atomic m_nClaim ; ///< Access to reclaimed node + CDS_ATOMIC::atomic m_bDone ; ///< the record is in work (concurrent access flag) + + /// Default ctor + retired_node() + : m_pNode( null_ptr() ) + , m_funcFree( null_ptr() ) + , m_nNextFree(0) + , m_nClaim(0) + , m_bDone( false ) + {} + + /// Assignment ctor + retired_node( + ContainerNode * pNode ///< Node to retire + ,free_retired_ptr_func func ///< Destructor function + ) + : m_pNode( pNode ) + , m_funcFree( func ) + , m_nClaim(0) + , m_bDone( false ) + {} + + /// Compares two \ref retired_node + static bool Less( const retired_node& p1, const retired_node& p2 ) + { + return p1.m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) < p2.m_pNode.load( CDS_ATOMIC::memory_order_relaxed ); + } + + /// Assignment operator + retired_node& set( ContainerNode * pNode, free_retired_ptr_func func ) + { + m_bDone.store( false, CDS_ATOMIC::memory_order_relaxed ); + m_nClaim.store( 0, CDS_ATOMIC::memory_order_relaxed ); + m_funcFree = func; + m_pNode.store( pNode, CDS_ATOMIC::memory_order_release ); + CDS_COMPILER_RW_BARRIER; + return *this; + } + + /// Invokes destructor function for the pointer + void free() + { + assert( m_funcFree != null_ptr() ); + m_funcFree( m_pNode.load( CDS_ATOMIC::memory_order_relaxed )); + } + }; + + /// Compare two retired node + /** + This comparison operator is needed for sorting pointers on + deallocation step + */ + static inline bool operator <( const retired_node& p1, const retired_node& p2 ) + { + return retired_node::Less( p1, p2 ); + } + + /// Array of ready for destroying pointers + /** + The array object is belonged to one thread: only owner thread may write to this array, + any other thread can read one. + */ + class retired_vector + { + typedef cds::details::bounded_array vector_type ; ///< type of vector of retired pointer (implicit CDS_DEFAULT_ALLOCATOR dependency) + + //@cond + static const size_t m_nEndFreeList = size_t(0) - 1 ; ///< End of free list + //@endcond + size_t m_nFreeList ; ///< Index of first free item in m_arr + vector_type m_arr ; ///< Array of retired pointers (implicit \ref CDS_DEFAULT_ALLOCATOR dependence) + + public: + /// Iterator over retired pointer vector + typedef vector_type::iterator iterator; + /// Const iterator type + typedef vector_type::const_iterator const_iterator; + + public: + /// Ctor + retired_vector( const GarbageCollector& mgr ) ; // inline + ~retired_vector() + {} + + ///@anchor hrc_gc_retired_vector_capacity Capacity (max available size) of array + size_t capacity() const + { + return m_arr.capacity(); + } + + /// Returns count of retired node in array. This function is intended for debug purposes only + size_t retiredNodeCount() const + { + size_t nCount = 0; + const size_t nCapacity = capacity(); + for ( size_t i = 0; i < nCapacity; ++i ) { + if ( m_arr[i].m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) != null_ptr() ) + ++nCount; + } + return nCount; + } + + /// Push a new item into the array + void push( ContainerNode * p, free_retired_ptr_func pFunc ) + { + assert( !isFull()); + + size_t n = m_nFreeList; + assert( m_arr[n].m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) == null_ptr() ); + m_nFreeList = m_arr[n].m_nNextFree; + CDS_DEBUG_DO( m_arr[n].m_nNextFree = m_nEndFreeList ; ) + m_arr[n].set( p, pFunc ); + } + + /// Pops the item by index \p n from the array + void pop( size_t n ) + { + assert( n < capacity() ); + m_arr[n].m_pNode.store( null_ptr(), CDS_ATOMIC::memory_order_release ); + m_arr[n].m_nNextFree = m_nFreeList; + m_nFreeList = n; + } + + /// Checks if array is full + bool isFull() const + { + return m_nFreeList == m_nEndFreeList; + } + + /// Get the item by index \p i + retired_node& operator []( size_t i ) + { + assert( i < capacity() ); + return m_arr[i]; + } + + /// Returns a random-access iterator to the first element in the retired pointer vector + /** + If the vector is empty, end() == begin(). + */ + iterator begin() + { + return m_arr.begin(); + } + + /// Const version of begin() + const_iterator begin() const + { + return m_arr.begin(); + } + + /// A random-access iterator to the end of the vector object. + /** + If the vector is empty, end() == begin(). + */ + iterator end() + { + return m_arr.end(); + } + + /// Const version of end() + const_iterator end() const + { + return m_arr.end(); + } + }; + + } // namespace details +}}} // namespace cds::gc::hrc + +#endif // #ifndef __CDS_GC_HRC_SCHEMA_RETIRED_H diff --git a/cds/gc/hrc/gc_fwd.h b/cds/gc/hrc/gc_fwd.h new file mode 100644 index 00000000..09170552 --- /dev/null +++ b/cds/gc/hrc/gc_fwd.h @@ -0,0 +1,15 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_HRC_SCHEMA_GC_FWD_H +#define __CDS_GC_HRC_SCHEMA_GC_FWD_H + +#include + +//@cond +namespace cds { namespace gc { namespace hrc { + // Forward declaration + class GC; +}}} // namespace cds::gc::hrc +//@endcond + +#endif // #ifndef __CDS_GC_HRC_SCHEMA_GC_FWD_H diff --git a/cds/gc/hrc/hrc.h b/cds/gc/hrc/hrc.h new file mode 100644 index 00000000..9fe760f7 --- /dev/null +++ b/cds/gc/hrc/hrc.h @@ -0,0 +1,690 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_HRC_HRC_H +#define __CDS_GC_HRC_HRC_H + +/* + Editions: + 2008.03.08 Maxim.Khiszinsky Created +*/ + +#include +#include +#include + +#include +#include + +#include + +#include + +#if CDS_COMPILER == CDS_COMPILER_MSVC +# pragma warning(push) +// warning C4251: 'cds::gc::hzp::GarbageCollector::m_pListHead' : class 'cds::cxx11_atomics::atomic' +// needs to have dll-interface to be used by clients of class 'cds::gc::hzp::GarbageCollector' +# pragma warning(disable: 4251) +#endif + + +namespace cds { namespace gc { + + // forwards + class HRC; + + /// Gidenstam's memory reclamation schema (HRC) + /** + + \par Sources: + - [2006] A.Gidenstam "Algorithms for synchronization and consistency + in concurrent system services", Chapter 5 "Lock-Free Memory Reclamation" + Thesis for the degree of Doctor of Philosophy + - [2005] Anders Gidenstam, Marina Papatriantafilou and Philippas Tsigas "Allocating + memory in a lock-free manner", Proceedings of the 13th Annual European + Symposium on Algorithms (ESA 2005), Lecture Notes in Computer + Science Vol. 3669, pages 229 – 242, Springer-Verlag, 2005 + + + The \p %cds::gc::hrc namespace and its members are internal representation of the GC and should not be used directly. + Use \p cds::gc::HRC class in your code. + + This reclamation schema combines Michael's Hazard Pointer schema (see \p cds::gc::hzp) + for deferred safe reclamation of unused objects and the reference counting + for controlling lifetime of the objects. + + HRC garbage collector is a singleton. The main user-level part of HRC schema is + GC class and its nested classes. Before use any HRC-related class you must initialize HRC garbage collector + by contructing \p %cds::gc::HRC object in beginning of your main(). + See \p cds::gc::HRC class for explanation. + */ + namespace hrc { + + /// Base class for all HRC-based container's node + /** + This interface is placed to the separate class since in the presence of a garbage collector + the lifetime of the node is greater than lifetime of its container. + Reclaimed node may be physically deleted later than its container. + So, the ContainerNode must be smarter than the usual. + */ + class ContainerNode + { + protected: + + friend class GarbageCollector; + friend class ThreadGC; + friend class gc::HRC; + + unsigned_ref_counter m_RC ; ///< reference counter + CDS_ATOMIC::atomic m_bTrace ; ///< \p true - node is tracing by Scan + CDS_ATOMIC::atomic m_bDeleted ; ///< \p true - node is deleted + + protected: + //@cond + ContainerNode() ; // inline, see hrc_inline.h + virtual ~ContainerNode() ; // inline, see hrc_inline.h + //@endcond + + public: + /// Returns count of reference for the node + unsigned_ref_counter::ref_counter_type getRefCount() const CDS_NOEXCEPT + { + return m_RC.value(); + } + + /// Increments the reference counter of the node + void incRefCount() CDS_NOEXCEPT + { + m_RC.inc(); + } + + /// Decrements the reference counter of the node. Returns \p true if ref counter is 0. + bool decRefCount() CDS_NOEXCEPT + { + return m_RC.dec(); + } + + /// Returns the mark whether the node is deleted + bool isDeleted() const CDS_NOEXCEPT + { + return m_bDeleted.load( CDS_ATOMIC::memory_order_acquire ); + } + + protected: + //@cond + void clean( CDS_ATOMIC::memory_order order ) CDS_NOEXCEPT + { + m_bDeleted.store( false, order ); + m_bTrace.store( false, order ); + } + //@endcond + + protected: // GC interface + /** + [Gidenstam 2006]: "The procedure \p CleanUpNode will make sure that all claimed references from + the links of the given node will only point to active nodes, thus removing redundant + passages through an arbitrary number of deleted nodes" + + The pseudocode of this method must be like following: + \code + void cleanUp( ThreadGC * pGC ) + for all x where link[x] of node is reference-counted do + retry: + node1 := link[x]; + if node1 != NULL and node1.m_bDeleted then + node2 := node1->link[x]; + pGC->CASRef( this->link[x], node1, node2 ); + pGC->releaseRef( node2 ); + pGC->releaseRef( node1 ); + goto retry; + pGC->releaseRef(node1); + \endcode + + Be aware to use hazard pointers inside implementation of this method. cleanUp is called from + the container's method when deleting the nodes. However, some hazard pointers may be occupied + by container's method. You should allocate new hazard pointers inside \p cleanUp method, for example: + \code + gc::hrc::AutoHPArray<2> hpArr( *pGC ); + \endcode + */ + virtual void cleanUp( ThreadGC * pGC ) = 0; + + /** + [Gidenstam 2006]: "The procedure \p TerminateNode will make sure that none of the links in the + given node will have any claim on any other node. TerminateNode is called on + a deleted node when there are no claims from any other node or thread to the + node" + + The pseudocode of this method must be like following: + \code + void terminate( ThreadGC * pGC, bool bConcurrent) + if !bConcurrent + for all this->link where link is reference-counted do + link := NULL; + else + for all this->link where link is reference-counted do + repeat node1 := link; + until pGC->CASRef(link,node1,NULL); + \endcode + */ + virtual void terminate( ThreadGC * pGC, bool bConcurrent ) = 0; + + public: + /// Method to destroy (deallocate) node. Depends on node's allocator + //virtual void destroy() = 0; + }; + + //@cond + /// HRC GC implementation details + namespace details { + + /// Hazard pointer guard + typedef gc::hzp::details::HPGuardT HPGuard; + + /// Array of hazard pointers. + /** + This is wrapper for cds::gc::hzp::details::HPArray class + */ +#ifdef CDS_CXX11_TEMPLATE_ALIAS_SUPPORT + template using HPArray = gc::hzp::details::HPArrayT; +#else + template + class HPArray: public gc::hzp::details::HPArrayT + {}; +#endif + + /// HP record of the thread + /** + This structure is single writer - multiple reader type. The writer is the thread owned the record + */ + struct thread_descriptor { + typedef ContainerNode * hazard_ptr ; ///< base type of hazard pointer + + hzp::details::HPAllocator m_hzp ; ///< array of hazard pointers. Implicit \ref CDS_DEFAULT_ALLOCATOR dependence + details::retired_vector m_arrRetired ; ///< array of retired pointers + + //@cond + thread_descriptor( const GarbageCollector& HzpMgr ) ; // inline + ~thread_descriptor() + {} + //@endcond + + /// clear all hazard pointers + void clear() + { + m_hzp.clear(); + } + }; + } // namespace details + //@endcond + + /// Gidenstam's Garbage Collector + /** + This GC combines Hazard Pointers (HP) reclamation method by Michael's and the well-known reference counting + reclamation schema. The HP method is light-weight algorithm guarding local references only. Reference counting + schema is harder than HP with relation to the performance but can guard global references too. + Using Gidenstam's GC it can be possible to safely introduce to the lock-free data structures + very useful concepts like iterators. + + GarbageCollector is the singleton. + */ + class CDS_EXPORT_API GarbageCollector + { + public: + typedef cds::atomicity::event_counter event_counter ; ///< event counter type + + /// GC internal statistics + struct internal_state { + size_t nHPCount ; ///< HP count per thread (const) + size_t nMaxThreadCount ; ///< Max thread count (const) + size_t nMaxRetiredPtrCount ; ///< Max retired pointer count per thread (const) + size_t nHRCRecSize ; ///< Size of HRC record, bytes (const) + + size_t nHRCRecAllocated ; ///< Count of HRC record allocations + size_t nHRCRecUsed ; ///< Count of HRC record used + size_t nTotalRetiredPtrCount ; ///< Current total count of retired pointers + size_t nRetiredPtrInFreeHRCRecs; ///< Count of retired pointer in free (unused) HP records + + + event_counter::value_type evcAllocHRCRec ; ///< Event count of thread descriptor allocation + event_counter::value_type evcRetireHRCRec ; ///< Event count of thread descriptor reclamation + event_counter::value_type evcAllocNewHRCRec ; ///< Event count of new thread descriptor allocation + event_counter::value_type evcDeleteHRCRec ; ///< Event count of thread descriptor deletion + event_counter::value_type evcScanCall ; ///< Number of calls Scan + event_counter::value_type evcHelpScanCalls ; ///< Number of calls HelpScan + event_counter::value_type evcCleanUpAllCalls ; ///< Number of calls CleanUpAll + event_counter::value_type evcDeletedNode ; ///< Node deletion event counter + event_counter::value_type evcScanGuarded ; ///< Count of retired nodes that could not be deleted on Scan phase + event_counter::value_type evcScanClaimGuarded ; ///< Count of retired node that could not be deleted on Scan phase because of m_nClaim != 0 + +#ifdef CDS_DEBUG + event_counter::value_type evcNodeConstruct ; ///< Count of constructed ContainerNode + event_counter::value_type evcNodeDestruct ; ///< Count of destructed ContainerNode +#endif + }; + + /// "Global GC object is NULL" exception + CDS_DECLARE_EXCEPTION( HRCGarbageCollectorEmpty, "Global cds::gc::hrc::GarbageCollector is NULL" ); + + /// Not enough required Hazard Pointer count + CDS_DECLARE_EXCEPTION( HRCTooMany, "Not enough required Hazard Pointer count" ); + + private: + /// Internal statistics by events + struct statistics { + event_counter m_AllocHRCThreadDesc ; ///< Event count of thread descriptor allocation + event_counter m_RetireHRCThreadDesc ; ///< Event count of thread descriptor reclamation + event_counter m_AllocNewHRCThreadDesc ; ///< Event count of new thread descriptor allocation + event_counter m_DeleteHRCThreadDesc ; ///< Event count of deletion of thread descriptor + event_counter m_ScanCalls ; ///< Number of calls Scan + event_counter m_HelpScanCalls ; ///< Number of calls HelpScan + event_counter m_CleanUpAllCalls ; ///< Number of calls CleanUpAll + + event_counter m_DeletedNode ; ///< Node deletion event counter + event_counter m_ScanGuarded ; ///< Count of retired nodes that could not be deleted on Scan phase + event_counter m_ScanClaimGuarded ; ///< Count of retired node that could not be deleted on Scan phase because of m_nClaim != 0 + +# ifdef CDS_DEBUG + event_counter m_NodeConstructed ; ///< Count of ContainerNode constructed + event_counter m_NodeDestructed ; ///< Count of ContainerNode destructed +# endif + }; + + /// HRC control structure of global thread list + struct thread_list_node: public details::thread_descriptor + { + thread_list_node * m_pNext ; ///< next list record + ThreadGC * m_pOwner ; ///< Owner of record + CDS_ATOMIC::atomic m_idOwner ; ///< Id of thread owned; 0 - record is free + bool m_bFree ; ///< Node is help-scanned + + //@cond + thread_list_node( const GarbageCollector& HzpMgr ) + : thread_descriptor( HzpMgr ), + m_pNext(null_ptr()), + m_pOwner( null_ptr() ), + m_idOwner( cds::OS::nullThreadId() ), + m_bFree( false ) + {} + + ~thread_list_node() + { + assert( m_pOwner == null_ptr() ); + assert( m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == cds::OS::nullThreadId() ); + } + //@endcond + }; + + private: + CDS_ATOMIC::atomic m_pListHead ; ///< Head of thread list + + static GarbageCollector * m_pGC ; ///< HRC garbage collector instance + + statistics m_Stat ; ///< Internal statistics + bool m_bStatEnabled ; ///< @a true - accumulate internal statistics + + const size_t m_nHazardPointerCount ; ///< max count of thread's hazard pointer + const size_t m_nMaxThreadCount ; ///< max count of thread + const size_t m_nMaxRetiredPtrCount ; ///< max count of retired ptr per thread + + private: + //@cond + GarbageCollector( + size_t nHazardPtrCount, ///< number of hazard pointers + size_t nMaxThreadCount, ///< max number of threads + size_t nRetiredNodeArraySize ///< size of array of retired node + ); + ~GarbageCollector(); + //@endcond + + /// Allocates new HRC control structure from the heap (using operator new) + thread_list_node * newHRCThreadDesc(); + + /// Deletes \p pNode control structure + void deleteHRCThreadDesc( thread_list_node * pNode ); + + /// Clears retired nodes of \p pNode control structure + void clearHRCThreadDesc( thread_list_node * pNode ); + + /// Finds HRC control structure for current thread + thread_list_node * getHRCThreadDescForCurrentThread() const; + + public: + /// Create global instance of GarbageCollector + static void CDS_STDCALL Construct( + size_t nHazardPtrCount = 0, ///< number of hazard pointers + size_t nMaxThreadCount = 0, ///< max threads count + size_t nMaxNodeLinkCount = 0, ///< max number of links a @ref ContainerNode can contain + size_t nMaxTransientLinks = 0 ///< max number of links in live nodes that may transiently point to a deleted node + ); + + /// Destroy global instance of GarbageCollector + static void CDS_STDCALL Destruct(); + + /// Get global instance of GarbageCollector + static GarbageCollector& instance() + { + if ( !m_pGC ) + throw HRCGarbageCollectorEmpty(); + return *m_pGC; + } + + /// Checks if global GC object is constructed and may be used + static bool isUsed() + { + return m_pGC != null_ptr(); + } + + /// Get max count of hazard pointers as defined in @ref Construct call + size_t getHazardPointerCount() const + { + return m_nHazardPointerCount; + } + + /// Get max thread count as defined in @ref Construct call + size_t getMaxThreadCount() const + { + return m_nMaxThreadCount; + } + + /// Get max retired pointers count. It is calculated by the parameters of @ref Construct call + size_t getMaxRetiredPtrCount() const + { + return m_nMaxRetiredPtrCount; + } + + /// Get internal statistics + internal_state& getInternalState( internal_state& stat) const; + + /// Check if statistics enabled + bool isStatisticsEnabled() const + { + return m_bStatEnabled; + } + + /// Enable internal statistics + bool enableStatistics( bool bEnable ) + { + bool bCurEnabled = m_bStatEnabled; + m_bStatEnabled = bEnable; + return bCurEnabled; + } + + /// Checks that required hazard pointer count \p nRequiredCount is less or equal then max hazard pointer count + /** + If \p nRequiredCount > getHazardPointerCount() then the exception HZPTooMany is thrown + */ + static void checkHPCount( unsigned int nRequiredCount ) + { + if ( instance().getHazardPointerCount() < nRequiredCount ) + throw HRCTooMany(); + } + + public: // Internals for threads + + /// Allocates HRC thread descriptor (thread interface) + details::thread_descriptor * allocateHRCThreadDesc( ThreadGC * pThreadGC ); + + /// Retires HRC thread descriptor (thread interface) + void retireHRCThreadDesc( details::thread_descriptor * pRec ); + + /// The main method of GC + /** + The procedure searches through all not yet reclaimed nodes deleted by this thread + and reclaim only those that does not have any matching hazard pointers and do not have any + counted references from any links inside of nodes. + @a Scan is called in context of thread owned \p pRec. + */ + void Scan( ThreadGC * pThreadGC ); + + /// Manage free thread_descriptor records and move all retired pointers to \p pThreadGC + void HelpScan( ThreadGC * pThreadGC ); + + /// Global clean up + /** + The procedure try to remove redundant claimed references from links in deleted nodes + that has been deleted by any thread. \p pThreadGC - ThreadGC of calling thread + */ + void CleanUpAll( ThreadGC * pThreadGC ); + + //@cond + void try_retire( ThreadGC * pThreadGC ) ; // inline in hrc_inline.h + //@endcond + +# ifdef CDS_DEBUG + public: + //@cond + void dbgNodeConstructed() { ++m_Stat.m_NodeConstructed; } + void dbgNodeDestructed() { ++m_Stat.m_NodeDestructed; } + //@endcond +# endif + + }; + + class AutoHPGuard; + + /// Thread's Garbage collector + /** + To use HRC reclamation schema each thread object must be linked with the object of ThreadGC class + that interacts with GarbageCollector global object. The linkage is performed by calling cds::threading \p Manager::attachThread() + on the start of each thread that uses HRC GC. Before terminating the thread linked to HRC GC it is necessary to call + cds::threading \p Manager::detachThread(). + */ + class ThreadGC: cds::details::noncopyable + { + GarbageCollector& m_gc ; ///< master garbage collector + details::thread_descriptor * m_pDesc ; ///< descriptor of GC data for the thread + + friend class GarbageCollector; + + public: + //@cond + ThreadGC() + : m_gc( GarbageCollector::instance() ) + , m_pDesc( null_ptr() ) + {} + ~ThreadGC() + { + fini(); + } + //@endcond + + /// Checks if thread GC is initialized + bool isInitialized() const { return m_pDesc != null_ptr() ; } + + /// Initialization. Multiple calls is allowed + void init() + { + if ( !m_pDesc ) + m_pDesc = m_gc.allocateHRCThreadDesc( this ); + } + + /// Finalization. Multiple calls is allowed + void fini() + { + if ( m_pDesc ) { + cleanUpLocal(); + m_gc.Scan( this ); + details::thread_descriptor * pRec = m_pDesc; + m_pDesc = null_ptr(); + if ( pRec ) + m_gc.retireHRCThreadDesc( pRec ); + } + } + public: // HRC garbage collector methods + + /// Initializes HP guard \p guard + details::HPGuard& allocGuard() + { + assert( m_pDesc != null_ptr() ); + return m_pDesc->m_hzp.alloc(); + } + + /// Frees HP guard \p guard + void freeGuard( details::HPGuard& guard ) + { + assert( m_pDesc != null_ptr() ); + m_pDesc->m_hzp.free( guard ); + } + + /// Initializes HP guard array \p arr + template + void allocGuard( details::HPArray& arr ) + { + assert( m_pDesc != null_ptr() ); + m_pDesc->m_hzp.alloc( arr ); + } + + /// Frees HP guard array \p arr + template + void freeGuard( details::HPArray& arr ) + { + assert( m_pDesc != null_ptr() ); + m_pDesc->m_hzp.free( arr ); + } + + /// Retire (deferred delete) node \p pNode guarded by \p hp hazard pointer + void retireNode( ContainerNode * pNode, details::HPGuard& hp, details::free_retired_ptr_func pFunc ) + { + assert( !pNode->m_bDeleted.load( CDS_ATOMIC::memory_order_relaxed ) ); + assert( pNode == hp ); + + retireNode( pNode, pFunc ); + hp.clear(); + } + + /// Retire (deferred delete) node \p pNode. Do not use this function directly! + void retireNode( ContainerNode * pNode, details::free_retired_ptr_func pFunc ) + { + assert( !pNode->m_bDeleted.load( CDS_ATOMIC::memory_order_relaxed ) ); + + pNode->m_bDeleted.store( true, CDS_ATOMIC::memory_order_release ); + pNode->m_bTrace.store( false, CDS_ATOMIC::memory_order_release ); + + m_pDesc->m_arrRetired.push( pNode, pFunc ); + + if ( m_pDesc->m_arrRetired.isFull() ) + m_gc.try_retire( this ); + } + + //@cond + void scan() + { + m_gc.try_retire( this ); + } + //@endcond + + protected: + /// The procedure will try to remove redundant claimed references from link in deleted nodes that has been deleted by this thread + void cleanUpLocal() + { + details::retired_vector::iterator itEnd = m_pDesc->m_arrRetired.end(); + for ( details::retired_vector::iterator it = m_pDesc->m_arrRetired.begin(); it != itEnd; ++it ) { + details::retired_node& node = *it; + ContainerNode * pNode = node.m_pNode.load(CDS_ATOMIC::memory_order_acquire); + if ( pNode && !node.m_bDone.load(CDS_ATOMIC::memory_order_acquire) ) + pNode->cleanUp( this ); + } + } + }; + + /// Auto HPGuard. + class AutoHPGuard + { + //@cond + details::HPGuard& m_hp ; ///< hazard pointer + ThreadGC& m_mgr ; ///< Thread GC. + //@endcond + public: + typedef details::HPGuard::hazard_ptr hazard_ptr ; ///< Hazard pointer type + + public: + /// Allocates HP guard from \p mgr + AutoHPGuard( ThreadGC& mgr ) + : m_hp( mgr.allocGuard() ) + , m_mgr( mgr ) + {} + + /// Allocates HP guard from \p mgr and protects the pointer \p p of type \p T + template + AutoHPGuard( ThreadGC& mgr, T * p ) + : m_hp( mgr.allocGuard() ) + , m_mgr( mgr ) + { + m_hp = p; + } + + /// Frees HP guard + ~AutoHPGuard() + { + m_mgr.freeGuard( m_hp ); + } + + /// Returns thread GC + ThreadGC& getGC() const CDS_NOEXCEPT + { + return m_mgr; + } + + //@cond + template + T * operator =( T * p ) CDS_NOEXCEPT + { + return m_hp = p; + } + //@endcond + + //@cond + hazard_ptr get() const CDS_NOEXCEPT + { + return m_hp; + } + //@endcond + + /// Clears the hazard pointer + void clear() CDS_NOEXCEPT + { + m_hp.clear(); + } + }; + + /// Auto-managed array of hazard pointers + /** + This class is wrapper around gc::hzp::details::HPArray class. + */ + template + class AutoHPArray: public details::HPArray + { + ThreadGC& m_mgr ; ///< Thread GC + + public: + /// Allocates array of HP guard from \p mgr + AutoHPArray( ThreadGC& mgr ) + : m_mgr( mgr ) + { + mgr.allocGuard( *this ); + } + + /// Frees array of HP guard + ~AutoHPArray() + { + m_mgr.freeGuard( *this ); + } + + /// Returns thread GC + ThreadGC& getGC() const + { + return m_mgr; + } + }; + + + } // namespace hrc +}} // namespace cds::gc + +#include + +#if CDS_COMPILER == CDS_COMPILER_MSVC +# pragma warning(pop) +#endif + +#endif // #ifndef __CDS_GC_HRC_HRC_H diff --git a/cds/gc/hrc_decl.h b/cds/gc/hrc_decl.h new file mode 100644 index 00000000..8c1c6260 --- /dev/null +++ b/cds/gc/hrc_decl.h @@ -0,0 +1,840 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_HRC_DECL_H +#define __CDS_GC_HRC_DECL_H + +#include +#include + +namespace cds { namespace gc { + + /// Gidenstam's garbage collector + /** @ingroup cds_garbage_collector + @headerfile cds/gc/hrc.h + + This class is a wrapper for Gidenstam's memory reclamation schema (HRC - Hazard pointer + Reference Counting) + internal implementation. + + Sources: + - [2006] A.Gidenstam "Algorithms for synchronization and consistency + in concurrent system services", Chapter 5 "Lock-Free Memory Reclamation" + Thesis for the degree of Doctor of Philosophy + - [2005] Anders Gidenstam, Marina Papatriantafilou and Philippas Tsigas "Allocating + memory in a lock-free manner", Proceedings of the 13th Annual European + Symposium on Algorithms (ESA 2005), Lecture Notes in Computer + Science Vol. 3669, pages 229 – 242, Springer-Verlag, 2005 + + Note that HRC schema does not support cyclic references that significantly limits the applicability of this GC. + +

Usage

+ In your \p main function you declare a object of class cds::gc::HRC. This declaration + initializes internal hrc::GarbageCollector singleton. + \code + #include // for cds::Initialize and cds::Terminate + #include + + int main(int argc, char** argv) + { + // Initialize libcds + cds::Initialize(); + + { + // Initialize HRC singleton + cds::gc::HRC hpGC(); + + // Some useful work + ... + } + + // Terminate libcds + cds::Terminate(); + } + \endcode + + Each thread that uses cds::gc::HRC -based containers must be attached to HRC + singleton. To make attachment you should declare a object of class HRC::thread_gc: + \code + #include + + int myThreadEntryPoint() + { + // Attach the thread to HRC singleton + cds::gc::HRC::thread_gc myThreadGC(); + + // Do some work + ... + + // The destructor of myThreadGC object detaches the thread from HRC singleton + } + \endcode + + In some cases, you should work in a external thread. For example, your application + is a plug-in for a server that calls your code in the threads that has been created by server. + In this case, you may use persistent mode of HRC::thread_gc. In this mode, the thread attaches + to the HRC singleton only if it is not yet attached and never call detaching: + \code + #include + + int myThreadEntryPoint() + { + // Attach the thread in persistent mode + cds::gc::HRC::thread_gc myThreadGC( true ); + + // Do some work + ... + + // The destructor of myThreadGC object does NOT detach the thread from HRC singleton + } + \endcode + + */ + class HRC + { + public: + + /// Thread GC implementation for internal usage + typedef hrc::ThreadGC thread_gc_impl; + + /// Wrapper for hrc::ThreadGC class + /** + @headerfile cds/gc/hrc.h + This class performs automatically attaching/detaching Gidenstam's GC + for the current thread. + */ + class thread_gc: public thread_gc_impl + { + //@cond + bool m_bPersistent; + //@endcond + public: + /// Constructor + /** + The constructor attaches the current thread to the Gidenstam's GC + if it is not yet attached. + The \p bPersistent parameter specifies attachment persistence: + - \p true - the class destructor will not detach the thread from Gidenstam's GC. + - \p false (default) - the class destructor will detach the thread from Gidenstam's GC. + */ + thread_gc( + bool bPersistent = false + ) ; // inline in hrc_impl.h + + /// Destructor + /** + If the object has been created in persistent mode, the destructor does nothing. + Otherwise it detaches the current thread from HRC GC. + */ + ~thread_gc() ; // inline in hrc_impl.h + }; + + ///@anchor hrc_gc_HRC_container_node Base for container node + typedef hrc::ContainerNode container_node; + + /// Native hazard pointer type + typedef container_node * guarded_pointer; + + /// Atomic reference + /** + @headerfile cds/gc/hrc.h + */ + template + class atomic_ref: protected CDS_ATOMIC::atomic + { + //@cond + typedef CDS_ATOMIC::atomic base_class; + //@endcond + public: + //@cond +# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT + atomic_ref() = default; +# else + atomic_ref() CDS_NOEXCEPT + : base_class() + {} +# endif + explicit CDS_CONSTEXPR atomic_ref(T * p) CDS_NOEXCEPT + : base_class( p ) + {} + //@endcond + + /// Read reference value + T * load( CDS_ATOMIC::memory_order order ) const CDS_NOEXCEPT + { + return base_class::load( order ); + } + //@cond + T * load( CDS_ATOMIC::memory_order order ) const volatile CDS_NOEXCEPT + { + return base_class::load( order ); + } + //@endcond + + /// Store new value to reference + void store( T * pNew, CDS_ATOMIC::memory_order order ) CDS_NOEXCEPT + { + before_store( pNew ); + T * pOld = base_class::exchange( pNew, order ); + after_store( pOld, pNew ); + } + //@cond + void store( T * pNew, CDS_ATOMIC::memory_order order ) volatile CDS_NOEXCEPT + { + before_store( pNew ); + T * pOld = base_class::exchange( pNew, order ); + after_store( pOld, pNew ); + } + //@endcond + + /// Updates atomic reference from current value \p pOld to new value \p pNew (strong CAS) + /** + May be used when concurrent updates are possible + + \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type + */ + bool compare_exchange_strong( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) CDS_NOEXCEPT + { + before_cas( pNew ); + bool bSuccess = base_class::compare_exchange_strong( pOld, pNew, mo_success, mo_fail ); + after_cas( bSuccess, pOld, pNew ); + return bSuccess; + } + //@cond + bool compare_exchange_strong( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) volatile CDS_NOEXCEPT + { + before_cas( pNew ); + bool bSuccess = base_class::compare_exchange_strong( pOld, pNew, mo_success, mo_fail ); + after_cas( bSuccess, pOld, pNew ); + return bSuccess; + } + bool compare_exchange_strong( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success ) CDS_NOEXCEPT + { + return compare_exchange_strong( pOld, pNew, mo_success, CDS_ATOMIC::memory_order_relaxed ); + } + bool compare_exchange_strong( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success ) volatile CDS_NOEXCEPT + { + return compare_exchange_strong( pOld, pNew, mo_success, CDS_ATOMIC::memory_order_relaxed ); + } + //@endcond + + /// Updates atomic reference from current value \p pOld to new value \p pNew (weak CAS) + /** + May be used when concurrent updates are possible + + \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type + */ + bool compare_exchange_weak( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) CDS_NOEXCEPT + { + before_cas( pNew ); + bool bSuccess = base_class::compare_exchange_weak( pOld, pNew, mo_success, mo_fail ); + after_cas( bSuccess, pOld, pNew ); + return bSuccess; + } + //@cond + bool compare_exchange_weak( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) volatile CDS_NOEXCEPT + { + before_cas( pNew ); + bool bSuccess = base_class::compare_exchange_weak( pOld, pNew, mo_success, mo_fail ); + after_cas( bSuccess, pOld, pNew ); + return bSuccess; + } + bool compare_exchange_weak( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success ) CDS_NOEXCEPT + { + return compare_exchange_weak( pOld, pNew, mo_success, CDS_ATOMIC::memory_order_relaxed ); + } + bool compare_exchange_weak( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success ) volatile CDS_NOEXCEPT + { + return compare_exchange_weak( pOld, pNew, mo_success, CDS_ATOMIC::memory_order_relaxed ); + } + //@endcond + + private: + //@cond + static void before_store( T * pNew ) CDS_NOEXCEPT + { + if ( pNew ) + ++pNew->m_RC; + } + static void after_store( T * pOld, T * pNew ) CDS_NOEXCEPT + { + if ( pNew ) + pNew->m_bTrace.store( false, CDS_ATOMIC::memory_order_release ); + if ( pOld ) + --pOld->m_RC; + } + static void before_cas( T * p ) CDS_NOEXCEPT + { + if ( p ) { + ++p->m_RC; + p->m_bTrace.store( false, CDS_ATOMIC::memory_order_release ); + } + } + static void after_cas( bool bSuccess, T * pOld, T * pNew ) CDS_NOEXCEPT + { + if ( bSuccess ) { + if ( pOld ) + --pOld->m_RC; + } + else { + if ( pNew ) + --pNew->m_RC; + } + } + //@endcond + }; + + /// Atomic marked pointer + /** + @headerfile cds/gc/hrc.h + */ + template + class atomic_marked_ptr + { + //@cond + CDS_ATOMIC::atomic< MarkedPtr > m_a; + //@endcond + public: + /// Marked pointer type + typedef MarkedPtr marked_ptr; + + //@cond + atomic_marked_ptr() CDS_NOEXCEPT + : m_a( marked_ptr() ) + {} + + explicit CDS_CONSTEXPR atomic_marked_ptr( typename marked_ptr::value_type * p ) CDS_NOEXCEPT + : m_a( marked_ptr(p) ) + {} + + atomic_marked_ptr( typename marked_ptr::value_type * ptr, int nMask ) CDS_NOEXCEPT + : m_a( marked_ptr(ptr, nMask) ) + {} + + explicit atomic_marked_ptr( marked_ptr const& ptr ) CDS_NOEXCEPT + : m_a( ptr ) + {} + //@endcond + + + /// Read reference value + marked_ptr load(CDS_ATOMIC::memory_order order) const CDS_NOEXCEPT + { + return m_a.load(order); + } + + /// Store new value to reference + void store( marked_ptr pNew, CDS_ATOMIC::memory_order order ) CDS_NOEXCEPT + { + before_store( pNew.ptr() ); + marked_ptr pOld = m_a.exchange( pNew, order ); + after_store( pOld.ptr(), pNew.ptr() ); + } + + /// Store new value to reference + void store( typename marked_ptr::pointer_type pNew, CDS_ATOMIC::memory_order order ) CDS_NOEXCEPT + { + before_store( pNew ); + marked_ptr pOld = m_a.exchange( marked_ptr(pNew), order ); + after_store( pOld.ptr(), pNew ); + } + + /// Updates atomic reference from current value \p pOld to new value \p pNew (weak CAS) + /** + May be used when concurrent updates are possible + + \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type + */ + bool compare_exchange_weak( marked_ptr& pOld, marked_ptr pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) CDS_NOEXCEPT + { + before_cas( pNew.ptr() ); + bool bSuccess = m_a.compare_exchange_weak( pOld, pNew, mo_success, mo_fail ); + after_cas( bSuccess, pOld.ptr(), pNew.ptr() ); + return bSuccess; + } + //@cond + bool compare_exchange_weak( marked_ptr& pOld, marked_ptr pNew, CDS_ATOMIC::memory_order mo_success ) CDS_NOEXCEPT + { + before_cas( pNew.ptr() ); + bool bSuccess = m_a.compare_exchange_weak( pOld, pNew, mo_success ); + after_cas( bSuccess, pOld.ptr(), pNew.ptr() ); + return bSuccess; + } + //@endcond + + /// Updates atomic reference from current value \p pOld to new value \p pNew (strong CAS) + /** + May be used when concurrent updates are possible + + \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type + */ + bool compare_exchange_strong( marked_ptr& pOld, marked_ptr pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) CDS_NOEXCEPT + { + // protect pNew + before_cas( pNew.ptr() ); + bool bSuccess = m_a.compare_exchange_strong( pOld, pNew, mo_success, mo_fail ); + after_cas( bSuccess, pOld.ptr(), pNew.ptr() ); + return bSuccess; + } + //@cond + bool compare_exchange_strong( marked_ptr& pOld, marked_ptr pNew, CDS_ATOMIC::memory_order mo_success ) CDS_NOEXCEPT + { + before_cas( pNew.ptr() ); + bool bSuccess = m_a.compare_exchange_strong( pOld, pNew, mo_success ); + after_cas( bSuccess, pOld.ptr(), pNew.ptr() ); + return bSuccess; + } + //@endcond + + private: + //@cond + static void before_store( typename marked_ptr::pointer_type p ) CDS_NOEXCEPT + { + if ( p ) + ++p->m_RC; + } + static void after_store( typename marked_ptr::pointer_type pOld, typename marked_ptr::pointer_type pNew ) CDS_NOEXCEPT + { + if ( pNew ) + pNew->m_bTrace.store( false, CDS_ATOMIC::memory_order_release ); + if ( pOld ) + --pOld->m_RC; + } + static void before_cas( typename marked_ptr::pointer_type p ) CDS_NOEXCEPT + { + if ( p ) { + ++p->m_RC; + p->m_bTrace.store( false, CDS_ATOMIC::memory_order_release ); + } + } + static void after_cas( bool bSuccess, typename marked_ptr::pointer_type pOld, typename marked_ptr::pointer_type pNew ) CDS_NOEXCEPT + { + if ( bSuccess ) { + if ( pOld ) + --pOld->m_RC; + } + else { + if ( pNew ) + --pNew->m_RC; + } + } + //@endcond + }; + + /// HRC guard + /** + @headerfile cds/gc/hrc.h + This class is a wrapper for hrc::AutoHPGuard. + */ + class Guard: public hrc::AutoHPGuard + { + //@cond + typedef hrc::AutoHPGuard base_class; + //@endcond + + public: + /// Default constructor + Guard() ; // inline in hrc_impl.h + + /// Protects atomic pointer + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store it + to the HP slot repeatedly until the guard's value equals \p toGuard + */ + template + T * protect( atomic_ref const& toGuard ) + { + T * pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed); + T * pRet; + do { + pRet = assign( pCur ); + pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire); + } while ( pRet != pCur ); + return pCur; + } + + /// Protects a converted pointer of type \p atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store result of \p f functor + to the HP slot repeatedly until the guard's value equals \p toGuard. + + The function is useful for intrusive containers when \p toGuard is a node pointer + that should be converted to a pointer to the value type before guarding. + The parameter \p f of type Func is a functor that makes this conversion: + \code + struct functor { + value_type * operator()( T * p ); + }; + \endcode + Really, the result of f( toGuard.load() ) is assigned to the hazard pointer. + */ + template + T * protect( atomic_ref const& toGuard, Func f ) + { + T * pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed); + T * pRet; + do { + pRet = pCur; + assign( f( pCur ) ); + pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire); + } while ( pRet != pCur ); + return pCur; + } + + /// Protects a atomic marked reference \p link + /** + Returns current value of \p link. + + The function tries to load \p link and to store it + to the guard repeatedly until the guard's value equals \p link + */ + template + typename atomic_marked_ptr::marked_ptr protect( atomic_marked_ptr const& link ) + { + typename atomic_marked_ptr::marked_ptr p; + do { + assign( ( p = link.load(CDS_ATOMIC::memory_order_relaxed)).ptr() ); + } while ( p != link.load(CDS_ATOMIC::memory_order_acquire) ); + return p; + } + + /// Protects a atomic marked reference \p link + /** + Returns current value of \p link. + + The function tries to load \p link and to store it + to the guard repeatedly until the guard's value equals \p link + + The function is useful for intrusive containers when \p link is a node pointer + that should be converted to a pointer to the value type before guarding. + The parameter \p f of type Func is a functor that makes this conversion: + \code + struct functor { + value_type * operator()( T p ); + }; + \endcode + Really, the result of f( link.load() ) is assigned to the hazard pointer. + */ + template + typename atomic_marked_ptr::marked_ptr protect( atomic_marked_ptr const& link, Func f ) + { + typename atomic_marked_ptr::marked_ptr pCur; + do { + pCur = link.load(CDS_ATOMIC::memory_order_relaxed); + assign( f( pCur )); + } while ( pCur != link.load(CDS_ATOMIC::memory_order_acquire) ); + return pCur; + } + + /// Stores \p p to the guard + /** + The function equals to a simple assignment, no loop is performed. + Can be used for a pointer that cannot be changed concurrently. + */ + template + T * assign( T * p ) + { + return base_class::operator =(p); + } + + /// Stores marked pointer \p p to the guard + /** + The function equals to a simple assignment of p.ptr(), no loop is performed. + Can be used for a marked pointer that cannot be changed concurrently. + */ + template + T * assign( cds::details::marked_ptr p ) + { + return base_class::operator =( p.ptr() ); + } + + /// Copy from \p src guard to \p this guard + void copy( Guard const& src ) + { + assign( src.get_native() ); + } + + /// Clear value of the guard + void clear() + { + base_class::clear(); + } + + /// Get the value currently protected + template + T * get() const + { + return static_cast( get_native()); + } + + /// Get native hazard pointer stored + guarded_pointer get_native() const + { + return base_class::get(); + } + }; + + /// Array of guards + /** + @headerfile cds/gc/hrc.h + This class is a wrapper for AutoHPArray template. + Template parameter \p Limit defines the size of HP array. + */ + template + class GuardArray: public hrc::AutoHPArray + { + //@cond + typedef hrc::AutoHPArray base_class; + //@endcond + public: + /// Rebind array for other size \p OtherLimit + template + struct rebind { + typedef GuardArray other ; ///< rebinding result + }; + + public: + //@cond + GuardArray() ; // inline in hrc_impl.h + GuardArray( thread_gc_impl& threadGC ) + : base_class( threadGC ) + {} + //@endcond + + /// Protects an atomic reference \p link in slot \p nIndex + /** + Returns current value of \p link. + + The function tries to load \p pToGuard and to store it + to the slot \p nIndex repeatedly until the guard's value equals \p pToGuard + */ + template + T * protect( size_t nIndex, atomic_ref const& link ) + { + T * p; + do { + p = assign( nIndex, link.load(CDS_ATOMIC::memory_order_relaxed) ); + } while ( p != link.load(CDS_ATOMIC::memory_order_acquire) ); + return p; + } + + /// Protects a atomic marked reference \p link in slot \p nIndex + /** + Returns current value of \p link. + + The function tries to load \p link and to store it + to the slot \p nIndex repeatedly until the guard's value equals \p link + */ + template + typename atomic_marked_ptr::marked_ptr protect( size_t nIndex, atomic_marked_ptr const& link ) + { + typename atomic_marked_ptr::marked_ptr p; + do { + assign( nIndex, ( p = link.load(CDS_ATOMIC::memory_order_relaxed)).ptr() ); + } while ( p != link.load(CDS_ATOMIC::memory_order_acquire) ); + return p; + } + + /// Protects a pointer of type \p atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store it + to the slot \p nIndex repeatedly until the guard's value equals \p toGuard + + The function is useful for intrusive containers when \p toGuard is a node pointer + that should be converted to a pointer to the value type before guarding. + The parameter \p f of type Func is a functor that makes this conversion: + \code + struct functor { + value_type * operator()( T * p ); + }; + \endcode + Really, the result of f( toGuard.load() ) is assigned to the hazard pointer. + */ + template + T * protect(size_t nIndex, atomic_ref const& toGuard, Func f ) + { + T * pRet; + do { + assign( nIndex, f( pRet = toGuard.load(CDS_ATOMIC::memory_order_relaxed) )); + } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_acquire)); + + return pRet; + } + + /// Protects a atomic marked reference \p link in slot \p nIndex + /** + Returns current value of \p link. + + The function tries to load \p link and to store it + to the slot \p nIndex repeatedly until the guard's value equals \p link + + The function is useful for intrusive containers when \p link is a node pointer + that should be converted to a pointer to the value type before guarding. + The parameter \p f of type Func is a functor that makes this conversion: + \code + struct functor { + value_type * operator()( T p ); + }; + \endcode + Really, the result of f( link.load() ) is assigned to the hazard pointer. + */ + template + typename atomic_marked_ptr::marked_ptr protect( size_t nIndex, atomic_marked_ptr const& link, Func f ) + { + typename atomic_marked_ptr::marked_ptr p; + do { + p = link.load(CDS_ATOMIC::memory_order_relaxed); + assign( nIndex, f( p ) ); + } while ( p != link.load(CDS_ATOMIC::memory_order_acquire) ); + return p; + } + + /// Store \p to the slot \p nIndex + /** + The function equals to a simple assignment, no loop is performed. + */ + template + T * assign( size_t nIndex, T * p ) + { + base_class::set(nIndex, p); + return p; + } + + /// Store marked pointer \p p to the guard + /** + The function equals to a simple assignment of p.ptr(), no loop is performed. + Can be used for a marked pointer that cannot be changed concurrently. + */ + template + T * assign( size_t nIndex, cds::details::marked_ptr p ) + { + return base_class::set( nIndex, p.ptr() ); + } + + /// Copy guarded value from \p src guard to slot at index \p nIndex + void copy( size_t nIndex, Guard const& src ) + { + assign( nIndex, src.get_native() ); + } + + /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex + void copy( size_t nDestIndex, size_t nSrcIndex ) + { + assign( nDestIndex, get_native( nSrcIndex )); + } + + /// Clear value of the slot \p nIndex + void clear( size_t nIndex) + { + base_class::clear( nIndex ); + } + + /// Get current value of slot \p nIndex + template + T * get( size_t nIndex) const + { + return static_cast( get_native( nIndex ) ); + } + + /// Get native hazard pointer stored + guarded_pointer get_native( size_t nIndex ) const + { + return base_class::operator[](nIndex).get(); + } + + /// Capacity of the guard array + static CDS_CONSTEXPR size_t capacity() + { + return Limit; + } + }; + + public: + /// Initializes hrc::GarbageCollector singleton + /** + The constructor calls hrc::GarbageCollector::Construct with passed parameters. + See hrc::GarbageCollector::Construct for explanation of parameters meaning. + */ + HRC( + size_t nHazardPtrCount = 0, ///< number of hazard pointers + size_t nMaxThreadCount = 0, ///< max threads count + size_t nMaxNodeLinkCount = 0, ///< max number of links a @ref hrc::ContainerNode can contain + size_t nMaxTransientLinks = 0 ///< max number of links in live nodes that may transiently point to a deleted node + ) + { + hrc::GarbageCollector::Construct( + nHazardPtrCount, + nMaxThreadCount, + nMaxNodeLinkCount, + nMaxTransientLinks + ); + } + + /// Terminates hrc::GarbageCollector singleton + /** + The destructor calls \code hrc::GarbageCollector::Destruct() \endcode + */ + ~HRC() + { + hrc::GarbageCollector::Destruct(); + } + + /// Checks if count of hazard pointer is no less than \p nCountNeeded + /** + If \p bRaiseException is \p true (that is the default), the function raises an exception gc::too_few_hazard_pointers + if \p nCountNeeded is more than the count of hazard pointer per thread. + */ + static bool check_available_guards( size_t nCountNeeded, bool bRaiseException = true ) + { + if ( hrc::GarbageCollector::instance().getHazardPointerCount() < nCountNeeded ) { + if ( bRaiseException ) + throw cds::gc::too_few_hazard_pointers(); + return false; + } + return true; + } + + /// Retire pointer \p p with function \p pFunc + /** + The function places pointer \p p to array of pointers ready for removing. + (so called retired pointer array). The pointer can be safely removed when no guarded pointer points to it. + Deleting the pointer is the function \p pFunc call. + */ + template + static void retire( T * p, void (* pFunc)(T *) ) ; // inline in hrc_impl.h + + /// Retire pointer \p p with functor of type \p Disposer + /** + The function places pointer \p p to array of pointers ready for removing. + (so called retired pointer array). The pointer can be safely removed when no guard points to it. + + See gc::HP::retire for \p Disposer requirements. + */ + template + static void retire( T * p ) ; // inline in hrc_impl.h + + /// Checks if HRC GC is constructed and may be used + static bool isUsed() + { + return hrc::GarbageCollector::isUsed(); + } + + /// Forced GC cycle call for current thread + /** + Usually, this function should not be called directly. + */ + static void scan() ; // inline in hrc_impl.h + + /// Synonym for \ref scan() + static void force_dispose() + { + scan(); + } + }; +}} // namespace cds::gc + +#endif // #ifndef __CDS_GC_HRC_DECL_H diff --git a/cds/gc/hrc_impl.h b/cds/gc/hrc_impl.h new file mode 100644 index 00000000..db45a5bc --- /dev/null +++ b/cds/gc/hrc_impl.h @@ -0,0 +1,57 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_HRC_IMPL_H +#define __CDS_GC_HRC_IMPL_H + +#include +#include + +//@cond +namespace cds { namespace gc { + + inline HRC::thread_gc::thread_gc( + bool bPersistent + ) + : m_bPersistent( bPersistent ) + { + if ( !cds::threading::Manager::isThreadAttached() ) + cds::threading::Manager::attachThread(); + } + + inline HRC::thread_gc::~thread_gc() + { + if ( !m_bPersistent ) + cds::threading::Manager::detachThread(); + } + + inline HRC::Guard::Guard() + : Guard::base_class( cds::threading::getGC() ) + {} + + template + inline HRC::GuardArray::GuardArray() + : GuardArray::base_class( threading::getGC() ) + {} + + template + inline void HRC::retire( T * p, void (* pFunc)(T *) ) + { + cds::threading::getGC().retireNode( p, reinterpret_cast(pFunc) ); + } + + template + inline void HRC::retire( T * p ) + { + cds::threading::getGC().retireNode( p, reinterpret_cast( cds::details::static_functor::call )); + } + + inline void HRC::scan() + { + cds::threading::getGC().scan(); + } + + +}} // namespace cds::gc +//@endcond + +#endif // #ifndef __CDS_GC_HRC_IMPL_H diff --git a/cds/gc/hzp/details/hp_alloc.h b/cds/gc/hzp/details/hp_alloc.h new file mode 100644 index 00000000..b6e14201 --- /dev/null +++ b/cds/gc/hzp/details/hp_alloc.h @@ -0,0 +1,322 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_HZP_DETAILS_HP_ALLOC_H +#define __CDS_GC_HZP_DETAILS_HP_ALLOC_H + +#include +#include +#include +#include + +//@cond +namespace cds { + namespace gc { namespace hzp { + /// Hazard Pointer schema implementation details + namespace details { + + /// Hazard pointer guard + /** + It is unsafe to use this class directly. + Instead, the AutoHPGuard class should be used. + + Template parameter: + \li HazardPointer - type of hazard pointer. It is \ref hazard_pointer for Michael's Hazard Pointer reclamation schema + */ + template + class HPGuardT: protected CDS_ATOMIC::atomic + { + public: + typedef HazardPointer hazard_ptr ; ///< Hazard pointer type + private: + //@cond + typedef CDS_ATOMIC::atomic base_class; + //@endcond + + protected: + //@cond + template friend class HPAllocator; + //@endcond + + public: + HPGuardT() CDS_NOEXCEPT + : base_class( null_ptr() ) + {} + ~HPGuardT() CDS_NOEXCEPT + {} + + /// Sets HP value. Guards pointer \p p from reclamation. + /** + Storing has release semantics. + */ + template + T * operator =( T * p ) CDS_NOEXCEPT + { + // We use atomic store with explicit memory order because other threads may read this hazard pointer concurrently + base_class::store( reinterpret_cast(p), CDS_ATOMIC::memory_order_release ); + return p; + } + + /// Returns current value of hazard pointer + /** + Loading has acquire semantics + */ + operator hazard_ptr() const CDS_NOEXCEPT + { + return get(); + } + + /// Returns current value of hazard pointer + /** + Loading has acquire semantics + */ + hazard_ptr get() const CDS_NOEXCEPT + { + return base_class::load( CDS_ATOMIC::memory_order_acquire ); + } + + /// Clears HP + /** + Clearing has relaxed semantics. + */ + void clear() CDS_NOEXCEPT + { + // memory order is not necessary here + base_class::store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + //CDS_COMPILER_RW_BARRIER; + } + }; + + /// Specialization of HPGuardT for hazard_pointer type + typedef HPGuardT HPGuard; + + /// Array of hazard pointers. + /** + Array of hazard-pointer. Placing a pointer into this array guards the pointer against reclamation. + Template parameter \p Count defines the size of hazard pointer array. \p Count parameter should not exceed + GarbageCollector::getHazardPointerCount(). + + It is unsafe to use this class directly. Instead, the AutoHPArray should be used. + + While creating the object of HPArray class an array of size \p Count of hazard pointers is reserved by + the HP Manager of current thread. The object's destructor cleans all of reserved hazard pointer and + returns reserved HP to the HP pool of ThreadGC. + + Usually, it is not necessary to create an object of this class. The object of class ThreadGC contains + the HPArray object and implements interface for HP setting and freeing. + + Template parameter: + \li HazardPointer - type of hazard pointer. It is hazard_pointer usually + \li Count - capacity of array + + */ + template + class HPArrayT + { + public: + typedef HazardPointer hazard_ptr_type ; ///< Hazard pointer type + typedef HPGuardT atomic_hazard_ptr ; ///< Element type of the array + static const size_t c_nCapacity = Count ; ///< Capacity of the array + + private: + //@cond + atomic_hazard_ptr * m_arr ; ///< Hazard pointer array of size = \p Count + template friend class HPAllocator; + //@endcond + + public: + /// Constructs uninitialized array. + HPArrayT() CDS_NOEXCEPT + {} + + /// Destructs object + ~HPArrayT() CDS_NOEXCEPT + {} + + /// Returns max count of hazard pointer for this array + CDS_CONSTEXPR size_t capacity() const + { + return c_nCapacity; + } + + /// Set hazard pointer \p nIndex. 0 <= \p nIndex < \p Count + void set( size_t nIndex, hazard_ptr_type hzPtr ) CDS_NOEXCEPT + { + assert( nIndex < capacity() ); + m_arr[nIndex] = hzPtr; + } + + /// Returns reference to hazard pointer of index \p nIndex (0 <= \p nIndex < \p Count) + atomic_hazard_ptr& operator []( size_t nIndex ) CDS_NOEXCEPT + { + assert( nIndex < capacity() ); + return m_arr[nIndex]; + } + + /// Returns reference to hazard pointer of index \p nIndex (0 <= \p nIndex < \p Count) [const version] + atomic_hazard_ptr& operator []( size_t nIndex ) const CDS_NOEXCEPT + { + assert( nIndex < capacity() ); + return m_arr[nIndex]; + } + + /// Clears (sets to NULL) hazard pointer \p nIndex + void clear( size_t nIndex ) CDS_NOEXCEPT + { + assert( nIndex < capacity() ); + m_arr[ nIndex ].clear(); + } + }; + + /// Specialization of HPArrayT class for hazard_pointer type +#ifdef CDS_CXX11_TEMPLATE_ALIAS_SUPPORT + template using HPArray = HPArrayT; +#else + template + class HPArray: public HPArrayT + {}; +#endif + + /// Allocator of hazard pointers for the thread + /** + The hazard pointer array is the free-list of unused hazard pointer for the thread. + The array is managed as a stack. + The max size (capacity) of array is defined at ctor time and cannot be changed during object's lifetime + + Each allocator object is thread-private. + + Template parameters: + \li HazardPointer - type of hazard pointer (hazard_pointer usually) + \li Allocator - memory allocator class, default is \ref CDS_DEFAULT_ALLOCATOR + + This helper class should not be used directly. + */ + template < typename HazardPointer, class Allocator = CDS_DEFAULT_ALLOCATOR > + class HPAllocator + { + public: + typedef HazardPointer hazard_ptr_type ; ///< type of hazard pointer + typedef HPGuardT atomic_hazard_ptr ; ///< Atomic hazard pointer type + typedef Allocator allocator_type ; ///< allocator type + + private: + //@cond + typedef cds::details::Allocator< atomic_hazard_ptr, allocator_type > allocator_impl; + + atomic_hazard_ptr * m_arrHazardPtr ; ///< Array of hazard pointers + size_t m_nTop ; ///< The top of stack + const size_t m_nCapacity ; ///< Array capacity + + //@endcond + + public: + /// Default ctor + explicit HPAllocator( + size_t nCapacity ///< max count of hazard pointer per thread + ) + : m_arrHazardPtr( alloc_array( nCapacity ) ) + , m_nCapacity( nCapacity ) + { + make_free(); + } + + /// Dtor + ~HPAllocator() + { + allocator_impl().Delete( m_arrHazardPtr, capacity() ); + } + + /// Get capacity of array + size_t capacity() const CDS_NOEXCEPT + { + return m_nCapacity; + } + + /// Get size of array. The size is equal to the capacity of array + size_t size() const CDS_NOEXCEPT + { + return capacity(); + } + + /// Checks if all items are allocated + bool isFull() const CDS_NOEXCEPT + { + return m_nTop == 0; + } + + /// Allocates hazard pointer + atomic_hazard_ptr& alloc() CDS_NOEXCEPT + { + assert( m_nTop > 0 ); + --m_nTop; + return m_arrHazardPtr[m_nTop]; + } + + /// Frees previously allocated hazard pointer + void free( atomic_hazard_ptr& hp ) CDS_NOEXCEPT + { + assert( m_nTop < capacity() ); + hp.clear(); + ++m_nTop; + CDS_COMPILER_RW_BARRIER ; // ??? + } + + /// Allocates hazard pointers array + /** + Allocates \p Count hazard pointers from array \p m_arrHazardPtr + Returns initialized object \p arr + */ + template + void alloc( HPArrayT& arr ) CDS_NOEXCEPT + { + assert( m_nTop >= Count ); + m_nTop -= Count; + arr.m_arr = m_arrHazardPtr + m_nTop; + } + + /// Frees hazard pointer array + /** + Frees the array of hazard pointers allocated by previous call \p this->alloc. + */ + template + void free( const HPArrayT& arr ) CDS_NOEXCEPT + { + assert( m_nTop + Count <= capacity()); + for ( size_t i = m_nTop; i < m_nTop + Count; ++i ) + m_arrHazardPtr[ i ].clear(); + m_nTop += Count; + } + + /// Makes all HP free + void clear() CDS_NOEXCEPT + { + make_free(); + } + + /// Returns to i-th hazard pointer + atomic_hazard_ptr& operator []( size_t i ) CDS_NOEXCEPT + { + assert( i < capacity() ); + return m_arrHazardPtr[i]; + } + + private: + //@cond + void make_free() CDS_NOEXCEPT + { + for ( size_t i = 0; i < capacity(); ++i ) + m_arrHazardPtr[ i ].clear(); + m_nTop = capacity(); + } + + atomic_hazard_ptr * alloc_array( size_t nCapacity ) + { + return allocator_impl().NewArray( nCapacity ); + } + //@endcond + }; + + }}} // namespace gc::hzp::details +} // namespace cds +//@endcond + +#endif // #ifndef __CDS_GC_HZP_DETAILS_HP_ALLOC_H diff --git a/cds/gc/hzp/details/hp_fwd.h b/cds/gc/hzp/details/hp_fwd.h new file mode 100644 index 00000000..3183f47f --- /dev/null +++ b/cds/gc/hzp/details/hp_fwd.h @@ -0,0 +1,15 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_HZP_DETAILS_HP_FWD_H +#define __CDS_GC_HZP_DETAILS_HP_FWD_H + +namespace cds { + namespace gc { namespace hzp { + + // forward declarations + class GarbageCollector; + class ThreadGC; + } } +} + +#endif // #ifndef __CDS_GC_HZP_DETAILS_HP_FWD_H diff --git a/cds/gc/hzp/details/hp_inline.h b/cds/gc/hzp/details/hp_inline.h new file mode 100644 index 00000000..dbb561c4 --- /dev/null +++ b/cds/gc/hzp/details/hp_inline.h @@ -0,0 +1,26 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_HZP_DETAILS_HP_INLINE_H +#define __CDS_GC_HZP_DETAILS_HP_INLINE_H + +namespace cds { + namespace gc{ namespace hzp { namespace details { + + /************************************************************************/ + /* INLINES */ + /************************************************************************/ + inline retired_vector::retired_vector( const cds::gc::hzp::GarbageCollector& HzpMgr ) + : m_arr( HzpMgr.getMaxRetiredPtrCount() ), + m_nSize(0) + {} + + inline HPRec::HPRec( const cds::gc::hzp::GarbageCollector& HzpMgr ) + : m_hzp( HzpMgr.getHazardPointerCount() ), + m_arrRetired( HzpMgr ) + {} + + } } } // namespace gc::hzp::details +} // namespace cds + + +#endif // #ifndef __CDS_GC_HZP_DETAILS_HP_INLINE_H diff --git a/cds/gc/hzp/details/hp_retired.h b/cds/gc/hzp/details/hp_retired.h new file mode 100644 index 00000000..fc32ec6a --- /dev/null +++ b/cds/gc/hzp/details/hp_retired.h @@ -0,0 +1,86 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_HZP_DETAILS_HP_RETIRED_H +#define __CDS_GC_HZP_DETAILS_HP_RETIRED_H + +#include +#include + +#include + +namespace cds { + namespace gc{ namespace hzp { namespace details { + + /// Retired pointer + typedef cds::gc::details::retired_ptr retired_ptr; + + /// Array of retired pointers + /** + The vector of retired pointer ready to delete. + + The Hazard Pointer schema is build on thread-static arrays. For each HP-enabled thread the HP manager allocates + array of retired pointers. The array belongs to the thread: owner thread writes to the array, other threads + just read it. + */ + class retired_vector { + /// Underlying vector implementation + typedef cds::details::bounded_array retired_vector_impl; + + retired_vector_impl m_arr ; ///< the array of retired pointers + size_t m_nSize ; ///< Current size of \p m_arr + + public: + /// Iterator + typedef retired_vector_impl::iterator iterator; + + /// Constructor + retired_vector( const cds::gc::hzp::GarbageCollector& HzpMgr ) ; // inline + ~retired_vector() + {} + + /// Vector capacity. + /** + The capacity is constant for any thread. It is defined by cds::gc::hzp::GarbageCollector. + */ + size_t capacity() const { return m_arr.capacity(); } + + /// Current vector size (count of retired pointers in the vector) + size_t size() const { return m_nSize; } + + /// Set vector size. Uses internally + void size( size_t nSize ) + { + assert( nSize <= capacity() ); + m_nSize = nSize; + } + + /// Pushes retired pointer to the vector + void push( const retired_ptr& p ) + { + assert( m_nSize < capacity() ); + m_arr[ m_nSize ] = p; + ++m_nSize; + } + + /// Checks if the vector is full (size() == capacity() ) + bool isFull() const + { + return m_nSize >= capacity(); + } + + /// Begin iterator + iterator begin() { return m_arr.begin(); } + /// End iterator + iterator end() { return m_arr.begin() + m_nSize ; } + + /// Clears the vector. After clearing, size() == 0 + void clear() + { + m_nSize = 0; + } + }; + + } } } // namespace gc::hzp::details +} // namespace cds + +#endif // #ifndef __CDS_GC_HZP_DETAILS_HP_RETIRED_H diff --git a/cds/gc/hzp/details/hp_type.h b/cds/gc/hzp/details/hp_type.h new file mode 100644 index 00000000..7ff9ab35 --- /dev/null +++ b/cds/gc/hzp/details/hp_type.h @@ -0,0 +1,23 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_HZP_DETAILS_HP_TYPE_H +#define __CDS_GC_HZP_DETAILS_HP_TYPE_H + +#include + +namespace cds { + namespace gc { + namespace hzp { + + /// Hazard pointer + typedef void * hazard_pointer; + + /// Pointer to function to free (destruct and deallocate) retired pointer of specific type + typedef cds::gc::details::free_retired_ptr_func free_retired_ptr_func; + } + } +} + +#endif // #ifndef __CDS_GC_HZP_DETAILS_HP_TYPE_H + + diff --git a/cds/gc/hzp/hzp.h b/cds/gc/hzp/hzp.h new file mode 100644 index 00000000..a19c5447 --- /dev/null +++ b/cds/gc/hzp/hzp.h @@ -0,0 +1,656 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_HZP_HZP_H +#define __CDS_GC_HZP_HZP_H + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#if CDS_COMPILER == CDS_COMPILER_MSVC +# pragma warning(push) + // warning C4251: 'cds::gc::hzp::GarbageCollector::m_pListHead' : class 'cds::cxx11_atomics::atomic' + // needs to have dll-interface to be used by clients of class 'cds::gc::hzp::GarbageCollector' +# pragma warning(disable: 4251) +#endif + +/* + Editions: + 2007.12.24 khizmax Add statistics and CDS_GATHER_HAZARDPTR_STAT macro + 2008.03.06 khizmax Refactoring: implementation of HazardPtrMgr is moved to hazardptr.cpp + 2008.03.08 khizmax Remove HazardPtrMgr singleton. Now you must initialize/destroy HazardPtrMgr calling + HazardPtrMgr::Construct / HazardPtrMgr::Destruct before use (usually in main() function). + 2008.12.06 khizmax Refactoring. Changes class name, namespace hierarchy, all helper defs have been moved to details namespace + 2010.01.27 khizmax Introducing memory order constraint +*/ + +namespace cds { + /** + @page cds_garbage_collectors_comparison GC comparison + @ingroup cds_garbage_collector + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Feature%cds::gc::HP%cds::gc::HRC%cds::gc::PTB
Implementation qualitystableunstablestable
Performance rank (1 - slowest, 5 - fastest)514
Max number of guarded (hazard) pointers per threadlimited (specifies in GC object ctor)limited (specifies in GC object ctor)unlimited (dynamically allocated when needed)
Max number of retired pointers1boundedboundedbounded
Array of retired pointerspreallocated for each thread, limited in sizepreallocated for each thread, limited in sizeglobal for the entire process, unlimited (dynamically allocated when needed)
Support direct pointer to item of lock-free container (useful for iterators)not supportedpotentially supported (not implemented)not supported
+ + 1Unbounded count of retired pointer means a possibility of memory exhaustion. + */ + + /// Different safe memory reclamation schemas (garbage collectors) + /** @ingroup cds_garbage_collector + + This namespace specifies different safe memory reclamation (SMR) algorithms. + See \ref cds_garbage_collector "Garbage collectors" + */ + namespace gc { + + /// Michael's Hazard Pointers reclamation schema + /** + \par Sources: + - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes" + - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects" + - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers" + + + The cds::gc::hzp namespace and its members are internal representation of Hazard Pointer GC and should not be used directly. + Use cds::gc::HP class in your code. + + Hazard Pointer garbage collector is a singleton. The main user-level part of Hazard Pointer schema is + GC class and its nested classes. Before use any HP-related class you must initialize HP garbage collector + by contructing cds::gc::HP object in beginning of your main(). + See cds::gc::HP class for explanation. + */ + namespace hzp { + + namespace details { + /// Hazard pointer record of the thread + /** + The structure of type "single writer - multiple reader": only the owner thread may write to this structure + other threads have read-only access. + */ + struct HPRec { + HPAllocator m_hzp ; ///< array of hazard pointers. Implicit \ref CDS_DEFAULT_ALLOCATOR dependency + retired_vector m_arrRetired ; ///< Retired pointer array + + /// Ctor + HPRec( const cds::gc::hzp::GarbageCollector& HzpMgr ) ; // inline + ~HPRec() + {} + + /// Clears all hazard pointers + void clear() + { + m_hzp.clear(); + } + }; + } // namespace details + + /// GarbageCollector::Scan phase strategy + /** + See GarbageCollector::Scan for explanation + */ + enum scan_type { + classic, ///< classic scan as described in Michael's works (see GarbageCollector::classic_scan) + inplace ///< inplace scan without allocation (see GarbageCollector::inplace_scan) + }; + + /// Hazard Pointer singleton + /** + Safe memory reclamation schema by Michael "Hazard Pointers" + + \par Sources: + \li [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes" + \li [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects" + \li [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers" + + */ + class CDS_EXPORT_API GarbageCollector + { + public: + typedef cds::atomicity::event_counter event_counter ; ///< event counter type + + /// Internal GC statistics + struct InternalState { + size_t nHPCount ; ///< HP count per thread (const) + size_t nMaxThreadCount ; ///< Max thread count (const) + size_t nMaxRetiredPtrCount ; ///< Max retired pointer count per thread (const) + size_t nHPRecSize ; ///< Size of HP record, bytes (const) + + size_t nHPRecAllocated ; ///< Count of HP record allocations + size_t nHPRecUsed ; ///< Count of HP record used + size_t nTotalRetiredPtrCount ; ///< Current total count of retired pointers + size_t nRetiredPtrInFreeHPRecs ; ///< Count of retired pointer in free (unused) HP records + + event_counter::value_type evcAllocHPRec ; ///< Count of HPRec allocations + event_counter::value_type evcRetireHPRec ; ///< Count of HPRec retire events + event_counter::value_type evcAllocNewHPRec; ///< Count of new HPRec allocations from heap + event_counter::value_type evcDeleteHPRec ; ///< Count of HPRec deletions + + event_counter::value_type evcScanCall ; ///< Count of Scan calling + event_counter::value_type evcHelpScanCall ; ///< Count of HelpScan calling + event_counter::value_type evcScanFromHelpScan;///< Count of Scan calls from HelpScan + + event_counter::value_type evcDeletedNode ; ///< Count of deleting of retired objects + event_counter::value_type evcDeferredNode ; ///< Count of objects that cannot be deleted in Scan phase because of a hazard_pointer guards it + }; + + /// No GarbageCollector object is created + CDS_DECLARE_EXCEPTION( HZPManagerEmpty, "Global Hazard Pointer GarbageCollector is NULL" ); + + /// Not enough required Hazard Pointer count + CDS_DECLARE_EXCEPTION( HZPTooMany, "Not enough required Hazard Pointer count" ); + + private: + /// Internal GC statistics + struct Statistics { + event_counter m_AllocHPRec ; ///< Count of HPRec allocations + event_counter m_RetireHPRec ; ///< Count of HPRec retire events + event_counter m_AllocNewHPRec ; ///< Count of new HPRec allocations from heap + event_counter m_DeleteHPRec ; ///< Count of HPRec deletions + + event_counter m_ScanCallCount ; ///< Count of Scan calling + event_counter m_HelpScanCallCount ; ///< Count of HelpScan calling + event_counter m_CallScanFromHelpScan ; ///< Count of Scan calls from HelpScan + + event_counter m_DeletedNode ; ///< Count of retired objects deleting + event_counter m_DeferredNode ; ///< Count of objects that cannot be deleted in Scan phase because of a hazard_pointer guards it + }; + + /// Internal list of cds::gc::hzp::details::HPRec + struct hplist_node: public details::HPRec + { + hplist_node * m_pNextNode ; ///< next hazard ptr record in list + CDS_ATOMIC::atomic m_idOwner ; ///< Owner thread id; 0 - the record is free (not owned) + CDS_ATOMIC::atomic m_bFree ; ///< true if record if free (not owned) + + //@cond + hplist_node( const GarbageCollector& HzpMgr ) + : HPRec( HzpMgr ), + m_pNextNode(NULL), + m_idOwner( OS::nullThreadId() ), + m_bFree( true ) + {} + + ~hplist_node() + { + assert( m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == OS::nullThreadId() ); + assert( m_bFree.load(CDS_ATOMIC::memory_order_relaxed) ); + } + //@endcond + }; + + CDS_ATOMIC::atomic m_pListHead ; ///< Head of GC list + + static GarbageCollector * m_pHZPManager ; ///< GC instance pointer + + Statistics m_Stat ; ///< Internal statistics + bool m_bStatEnabled ; ///< true - statistics enabled + + const size_t m_nHazardPointerCount ; ///< max count of thread's hazard pointer + const size_t m_nMaxThreadCount ; ///< max count of thread + const size_t m_nMaxRetiredPtrCount ; ///< max count of retired ptr per thread + scan_type m_nScanType ; ///< scan type (see \ref scan_type enum) + + + private: + /// Ctor + GarbageCollector( + size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread + size_t nMaxThreadCount = 0, ///< Max count of thread + size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects + scan_type nScanType = inplace ///< Scan type (see \ref scan_type enum) + ); + + /// Dtor + ~GarbageCollector(); + + /// Allocate new HP record + hplist_node * NewHPRec(); + + /// Permanently deletes HPrecord \p pNode + /** + Caveat: for performance reason this function is defined as inline and cannot be called directly + */ + void DeleteHPRec( hplist_node * pNode ); + + /// Permanently deletes retired pointer \p p + /** + Caveat: for performance reason this function is defined as inline and cannot be called directly + */ + void DeletePtr( details::retired_ptr& p ); + + //@cond + void detachAllThread(); + //@endcond + + public: + /// Creates GarbageCollector singleton + /** + GC is the singleton. If GC instance is not exist then the function creates the instance. + Otherwise it does nothing. + + The Michael's HP reclamation schema depends of three parameters: + + \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from + the data structure algorithms. By default, if \p nHazardPtrCount = 0, + the function uses maximum of HP count for CDS library. + + \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100. + + \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than + \p nHazardPtrCount * \p nMaxThreadCount. + Default is 2 * \p nHazardPtrCount * \p nMaxThreadCount. + */ + static void CDS_STDCALL Construct( + size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread + size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application + size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread + scan_type nScanType = inplace ///< Scan type (see \ref scan_type enum) + ); + + /// Destroys global instance of GarbageCollector + /** + The parameter \p bDetachAll should be used carefully: if its value is \p true, + then the destroying GC automatically detaches all attached threads. This feature + can be useful when you have no control over the thread termination, for example, + when \p libcds is injected into existing external thread. + */ + static void CDS_STDCALL Destruct( + bool bDetachAll = false ///< Detach all threads + ); + + /// Returns pointer to GarbageCollector instance + static GarbageCollector& instance() + { + if ( m_pHZPManager == NULL ) + throw HZPManagerEmpty(); + return *m_pHZPManager; + } + + /// Checks if global GC object is constructed and may be used + static bool isUsed() + { + return m_pHZPManager != NULL; + } + + /// Returns max Hazard Pointer count defined in construction time + size_t getHazardPointerCount() const { return m_nHazardPointerCount; } + + /// Returns max thread count defined in construction time + size_t getMaxThreadCount() const { return m_nMaxThreadCount; } + + /// Returns max size of retired objects array. It is defined in construction time + size_t getMaxRetiredPtrCount() const { return m_nMaxRetiredPtrCount; } + + // Internal statistics + + /// Get internal statistics + InternalState& getInternalState(InternalState& stat) const; + + /// Checks if internal statistics enabled + bool isStatisticsEnabled() const { return m_bStatEnabled; } + + /// Enables/disables internal statistics + bool enableStatistics( bool bEnable ) + { + bool bEnabled = m_bStatEnabled; + m_bStatEnabled = bEnable; + return bEnabled; + } + + /// Checks that required hazard pointer count \p nRequiredCount is less or equal then max hazard pointer count + /** + If \p nRequiredCount > getHazardPointerCount() then the exception HZPTooMany is thrown + */ + static void checkHPCount( unsigned int nRequiredCount ) + { + if ( instance().getHazardPointerCount() < nRequiredCount ) + throw HZPTooMany(); + } + + /// Get current scan strategy + scan_type getScanType() const + { + return m_nScanType; + } + + /// Set current scan strategy + /** @anchor hzp_gc_setScanType + Scan strategy changing is allowed on the fly. + */ + void setScanType( + scan_type nScanType ///< new scan strategy + ) + { + m_nScanType = nScanType; + } + + public: // Internals for threads + + /// Allocates Hazard Pointer GC record. For internal use only + details::HPRec * AllocateHPRec(); + + /// Free HP record. For internal use only + void RetireHPRec( details::HPRec * pRec ); + + /// The main garbage collecting function + /** + This function is called internally by ThreadGC object when upper bound of thread's list of reclaimed pointers + is reached. + + There are the following scan algorithm: + - \ref hzp_gc_classic_scan "classic_scan" allocates memory for internal use + - \ref hzp_gc_inplace_scan "inplace_scan" does not allocate any memory + + Use \ref hzp_gc_setScanType "setScanType" member function to setup appropriate scan algorithm. + */ + void Scan( details::HPRec * pRec ) + { + switch ( m_nScanType ) { + case inplace: + inplace_scan( pRec ); + break; + default: + assert(false) ; // Forgotten something?.. + case classic: + classic_scan( pRec ); + break; + } + } + + /// Helper scan routine + /** + The function guarantees that every node that is eligible for reuse is eventually freed, barring + thread failures. To do so, after executing Scan, a thread executes a HelpScan, + where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers + to thread's list of reclaimed pointers. + + The function is called internally by Scan. + */ + void HelpScan( details::HPRec * pThis ); + + protected: + /// Classic scan algorithm + /** @anchor hzp_gc_classic_scan + Classical scan algorithm as described in Michael's paper. + + A scan includes four stages. The first stage involves scanning the array HP for non-null values. + Whenever a non-null value is encountered, it is inserted in a local list of currently protected pointer. + Only stage 1 accesses shared variables. The following stages operate only on private variables. + + The second stage of a scan involves sorting local list of protected pointers to allow + binary search in the third stage. + + The third stage of a scan involves checking each reclaimed node + against the pointers in local list of protected pointers. If the binary search yields + no match, the node is freed. Otherwise, it cannot be deleted now and must kept in thread's list + of reclaimed pointers. + + The forth stage prepares new thread's private list of reclaimed pointers + that could not be freed during the current scan, where they remain until the next scan. + + This algorithm allocates memory for internal HP array. + + This function is called internally by ThreadGC object when upper bound of thread's list of reclaimed pointers + is reached. + */ + void classic_scan( details::HPRec * pRec ); + + /// In-place scan algorithm + /** @anchor hzp_gc_inplace_scan + Unlike the \ref hzp_gc_classic_scan "classic_scan" algorithm, \p inplace_scan does not allocate any memory. + All operations are performed in-place. + */ + void inplace_scan( details::HPRec * pRec ); + }; + + /// Thread's hazard pointer manager + /** + To use Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class + that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()" + on the start of each thread that uses HP GC. Before terminating the thread linked to HP GC it is necessary to call + \ref cds_threading "cds::threading::Manager::detachThread()". + */ + class ThreadGC: cds::details::noncopyable + { + GarbageCollector& m_HzpManager ; ///< Hazard Pointer GC singleton + details::HPRec * m_pHzpRec ; ///< Pointer to thread's HZP record + + public: + ThreadGC() + : m_HzpManager( GarbageCollector::instance() ), + m_pHzpRec( NULL ) + {} + ~ThreadGC() + { + fini(); + } + + /// Checks if thread GC is initialized + bool isInitialized() const { return m_pHzpRec != NULL ; } + + /// Initialization. Repeat call is available + void init() + { + if ( !m_pHzpRec ) + m_pHzpRec = m_HzpManager.AllocateHPRec(); + } + + /// Finalization. Repeat call is available + void fini() + { + if ( m_pHzpRec ) { + details::HPRec * pRec = m_pHzpRec; + m_pHzpRec = NULL; + m_HzpManager.RetireHPRec( pRec ); + } + } + + /// Initializes HP guard \p guard + details::HPGuard& allocGuard() + { + assert( m_pHzpRec != NULL ); + return m_pHzpRec->m_hzp.alloc(); + } + + /// Frees HP guard \p guard + void freeGuard( details::HPGuard& guard ) + { + assert( m_pHzpRec != NULL ); + m_pHzpRec->m_hzp.free( guard ); + } + + /// Initializes HP guard array \p arr + template + void allocGuard( details::HPArray& arr ) + { + assert( m_pHzpRec != NULL ); + m_pHzpRec->m_hzp.alloc( arr ); + } + + /// Frees HP guard array \p arr + template + void freeGuard( details::HPArray& arr ) + { + assert( m_pHzpRec != NULL ); + m_pHzpRec->m_hzp.free( arr ); + } + + /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation + template + void retirePtr( T * p, void (* pFunc)(T *) ) + { + retirePtr( details::retired_ptr( reinterpret_cast( p ), reinterpret_cast( pFunc ) ) ); + } + + /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation + void retirePtr( const details::retired_ptr& p ) + { + m_pHzpRec->m_arrRetired.push( p ); + + if ( m_pHzpRec->m_arrRetired.isFull() ) { + // Max of retired pointer count is reached. Do scan + scan(); + } + } + + //@cond + void scan() + { + m_HzpManager.Scan( m_pHzpRec ); + m_HzpManager.HelpScan( m_pHzpRec ); + } + //@endcond + }; + + /// Auto HPGuard. + /** + This class encapsulates Hazard Pointer guard to protect a pointer against deletion . + It allocates one HP from thread's HP array in constructor and free the HP allocated in destruction time. + */ + class AutoHPGuard + { + //@cond + details::HPGuard& m_hp ; ///< Hazard pointer guarded + ThreadGC& m_gc ; ///< Thread GC + //@endcond + + public: + typedef details::HPGuard::hazard_ptr hazard_ptr ; ///< Hazard pointer type + public: + /// Allocates HP guard from \p gc + AutoHPGuard( ThreadGC& gc ) + : m_hp( gc.allocGuard() ) + , m_gc( gc ) + {} + + /// Allocates HP guard from \p gc and protects the pointer \p p of type \p T + template + AutoHPGuard( ThreadGC& gc, T * p ) + : m_hp( gc.allocGuard() ) + , m_gc( gc ) + { + m_hp = p; + } + + /// Frees HP guard. The pointer guarded may be deleted after this. + ~AutoHPGuard() + { + m_gc.freeGuard( m_hp ); + } + + /// Returns thread GC + ThreadGC& getGC() const + { + return m_gc; + } + + /// Protects the pointer \p p against reclamation (guards the pointer). + template + T * operator =( T * p ) + { + return m_hp = p; + } + + //@cond + hazard_ptr get() const + { + return m_hp; + } + //@endcond + }; + + /// Auto-managed array of hazard pointers + /** + This class is wrapper around cds::gc::hzp::details::HPArray class. + \p Count is the size of HP array + */ + template + class AutoHPArray: public details::HPArray + { + ThreadGC& m_mgr ; ///< Thread GC + + public: + /// Rebind array for other size \p COUNT2 + template + struct rebind { + typedef AutoHPArray other ; ///< rebinding result + }; + + public: + /// Allocates array of HP guard from \p mgr + AutoHPArray( ThreadGC& mgr ) + : m_mgr( mgr ) + { + mgr.allocGuard( *this ); + } + + /// Frees array of HP guard + ~AutoHPArray() + { + m_mgr.freeGuard( *this ); + } + + /// Returns thread GC + ThreadGC& getGC() const { return m_mgr; } + }; + + } // namespace hzp +}} // namespace cds::gc + +// Inlines +#include + +#if CDS_COMPILER == CDS_COMPILER_MSVC +# pragma warning(pop) +#endif + +#endif // #ifndef __CDS_GC_HZP_HZP_H diff --git a/cds/gc/nogc.h b/cds/gc/nogc.h new file mode 100644 index 00000000..38560f1c --- /dev/null +++ b/cds/gc/nogc.h @@ -0,0 +1,29 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_NOGC_H +#define __CDS_GC_NOGC_H + +namespace cds { namespace gc { + + /// No garbage collecting + /** @ingroup cds_garbage_collector + This empty class is used in \p libcds to mark that a template specialization implements + the container without any garbage collector schema. + + Usually, the container with this "GC" does not support the item removal. + */ + class nogc + { + public: + //@cond + /// Faked scan + static void scan() + {} + static void force_dispose() + {} + //@endcond + }; + +}} // namespace cds::gc + +#endif // #define __CDS_GC_NOGC_H diff --git a/cds/gc/ptb.h b/cds/gc/ptb.h new file mode 100644 index 00000000..8096695d --- /dev/null +++ b/cds/gc/ptb.h @@ -0,0 +1,10 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_PTB_H +#define __CDS_GC_PTB_H + +#include +#include +#include + +#endif // #ifndef __CDS_GC_PTB_H diff --git a/cds/gc/ptb/ptb.h b/cds/gc/ptb/ptb.h new file mode 100644 index 00000000..a3e01684 --- /dev/null +++ b/cds/gc/ptb/ptb.h @@ -0,0 +1,1027 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_PTB_PASS_THE_BUCK_H +#define __CDS_GC_PTB_PASS_THE_BUCK_H + +#include +#include +#include +#include +#include + +#include + +#if CDS_COMPILER == CDS_COMPILER_MSVC +# pragma warning(push) +# pragma warning(disable:4251) // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2' +#endif + +namespace cds { namespace gc { + + /// Pass The Buck reclamation schema + /** + \par Sources: + - [2002] M. Herlihy, V. Luchangco, and M. Moir. The repeat offender problem: A mechanism for supporting + dynamic-sized lockfree data structures. Technical Report TR-2002-112, Sun Microsystems Laboratories, 2002 + - [2002] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Dynamic-sized Lockfree Data Structures. + Technical Report TR-2002-110, Sun Microsystems Laboratories, 2002 + - [2005] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Nonblocking Memory Management Support + for Dynamic-Sized Data Structures. ACM Transactions on Computer Systems, Vol.23, No.2, May 2005 + + + The cds::gc::ptb namespace and its members are internal representation of the Pass-the-Buck GC and should not be used directly. + Use cds::gc::PTB class in your code. + + Pass-the-Buck (PTB) garbage collector is a singleton. The main user-level part of PTB schema is + GC class and its nested classes. Before use any PTB-related class you must initialize PTB garbage collector + by contructing cds::gc::PTB object in beginning of your main(). + See cds::gc::PTB class for explanation. + + \par Implementation issues + The global list of free guards (cds::gc::ptb::details::guard_allocator) is protected by spin-lock (i.e. serialized). + It seems that solution should not introduce significant performance bottleneck, because each thread has own set + of guards allocated from global list of free guards and access to global list is occurred only when + all thread's guard is busy. In this case the thread allocates next block of guards from global list. + Guards allocated for the thread is push back to the global list only when the thread terminates. + */ + namespace ptb { + + // Forward declarations + class Guard; + template class GuardArray; + class ThreadGC; + class GarbageCollector; + + /// Retired pointer type + typedef cds::gc::details::retired_ptr retired_ptr; + + using cds::gc::details::free_retired_ptr_func; + + /// Details of Pass the Buck algorithm + namespace details { + + // Forward declaration + class liberate_set; + + /// Retired pointer buffer node + struct retired_ptr_node { + retired_ptr m_ptr ; ///< retired pointer + retired_ptr_node * m_pNext ; ///< next retired pointer in buffer + retired_ptr_node * m_pNextFree ; ///< next item in free list of retired_ptr_node + }; + + /// Internal guard representation + struct guard_data { + typedef retired_ptr_node * handoff_ptr ; ///< trapped value type + typedef void * guarded_ptr ; ///< type of value guarded + + CDS_ATOMIC::atomic pPost ; ///< pointer guarded + +#if 0 + typedef cds::SpinLock handoff_spin ; ///< type of spin-lock for accessing to \p pHandOff field + handoff_spin spinHandOff ; ///< access to \p pHandOff field + handoff_ptr pHandOff ; ///< trapped pointer +#endif + + CDS_ATOMIC::atomic pGlobalNext ; ///< next item of global list of allocated guards + CDS_ATOMIC::atomic pNextFree ; ///< pointer to the next item in global or thread-local free-list + + guard_data * pThreadNext ; ///< next item of thread's local list of guards + + //@cond + guard_data() + : pPost( null_ptr()) +#if 0 + , pHandOff( null_ptr() ) +#endif + , pGlobalNext( null_ptr() ) + , pNextFree( null_ptr() ) + , pThreadNext( null_ptr() ) + {} + + void init() + { + pPost.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + } + //@endcond + + /// Checks if the guard is free, that is, it does not contain any pointer guarded + bool isFree() const + { + return pPost.load( CDS_ATOMIC::memory_order_acquire ) == null_ptr(); + } + }; + + /// Guard allocator + template + class guard_allocator + { + cds::details::Allocator m_GuardAllocator ; ///< guard allocator + + CDS_ATOMIC::atomic m_GuardList ; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field) + CDS_ATOMIC::atomic m_FreeGuardList ; ///< Head of free guard list (linked by guard_data::pNextFree field) + SpinLock m_freeListLock ; ///< Access to m_FreeGuardList + + /* + Unfortunately, access to the list of free guard is lock-based. + Lock-free manipulations with guard free-list are ABA-prone. + TODO: working with m_FreeGuardList in lock-free manner. + */ + + private: + /// Allocates new guard from the heap. The function uses aligned allocator + guard_data * allocNew() + { + //TODO: the allocator should make block allocation + + details::guard_data * pGuard = m_GuardAllocator.New(); + + // Link guard to the list + // m_GuardList is accumulated list and it cannot support concurrent deletion, + // so, ABA problem is impossible for it + details::guard_data * pHead = m_GuardList.load( CDS_ATOMIC::memory_order_acquire ); + do { + pGuard->pGlobalNext.store( pHead, CDS_ATOMIC::memory_order_relaxed ); + // pHead is changed by compare_exchange_weak + } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + + pGuard->init(); + return pGuard; + } + + public: + // Default ctor + guard_allocator() + : m_GuardList( null_ptr() ) + , m_FreeGuardList( null_ptr() ) + {} + + // Destructor + ~guard_allocator() + { + guard_data * pNext; + for ( guard_data * pData = m_GuardList.load( CDS_ATOMIC::memory_order_relaxed ); pData != null_ptr(); pData = pNext ) { + pNext = pData->pGlobalNext.load( CDS_ATOMIC::memory_order_relaxed ); + m_GuardAllocator.Delete( pData ); + } + } + + /// Allocates a guard from free list or from heap if free list is empty + guard_data * alloc() + { + // Try to pop a guard from free-list + details::guard_data * pGuard; + + { + cds::lock::scoped_lock al( m_freeListLock ); + pGuard = m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed); + if ( pGuard ) + m_FreeGuardList.store( pGuard->pNextFree.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed ); + } + if ( !pGuard ) + return allocNew(); + + pGuard->init(); + return pGuard; + } + + /// Frees guard \p pGuard + /** + The function places the guard \p pGuard into free-list + */ + void free( guard_data * pGuard ) + { + pGuard->pPost.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + + cds::lock::scoped_lock al( m_freeListLock ); + pGuard->pNextFree.store( m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed ); + m_FreeGuardList.store( pGuard, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Allocates list of guard + /** + The list returned is linked by guard's \p pThreadNext and \p pNextFree fields. + + cds::gc::ptb::ThreadGC supporting method + */ + guard_data * allocList( size_t nCount ) + { + assert( nCount != 0 ); + + guard_data * pHead; + guard_data * pLast; + + pHead = + pLast = alloc(); + + // The guard list allocated is private for the thread, + // so, we can use relaxed memory order + while ( --nCount ) { + guard_data * p = alloc(); + pLast->pNextFree.store( pLast->pThreadNext = p, CDS_ATOMIC::memory_order_relaxed ); + pLast = p; + } + + pLast->pNextFree.store( pLast->pThreadNext = null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + + return pHead; + } + + /// Frees list of guards + /** + The list \p pList is linked by guard's \p pThreadNext field. + + cds::gc::ptb::ThreadGC supporting method + */ + void freeList( guard_data * pList ) + { + assert( pList != null_ptr() ); + + guard_data * pLast = pList; + while ( pLast->pThreadNext ) { + pLast->pPost.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + guard_data * p; + pLast->pNextFree.store( p = pLast->pThreadNext, CDS_ATOMIC::memory_order_relaxed ); + pLast = p; + } + + cds::lock::scoped_lock al( m_freeListLock ); + pLast->pNextFree.store( m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed ); + m_FreeGuardList.store( pList, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Returns the list's head of guards allocated + guard_data * begin() + { + return m_GuardList.load(CDS_ATOMIC::memory_order_acquire); + } + }; + + /// Retired pointer buffer + /** + The buffer of retired nodes ready for liberating. + When size of buffer exceeds a threshold the GC calls \p liberate procedure to free + retired nodes. + */ + class retired_ptr_buffer + { + CDS_ATOMIC::atomic m_pHead ; ///< head of buffer + CDS_ATOMIC::atomic m_nItemCount; ///< buffer's item count + + public: + //@cond + retired_ptr_buffer() + : m_pHead( null_ptr() ) + , m_nItemCount(0) + {} + + ~retired_ptr_buffer() + { + assert( m_pHead.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr()); + } + //@endcond + + /// Pushes new node into the buffer. Returns current buffer size + size_t push( retired_ptr_node& node ) + { + retired_ptr_node * pHead = m_pHead.load(CDS_ATOMIC::memory_order_acquire); + do { + node.m_pNext = pHead; + // pHead is changed by compare_exchange_weak + } while ( !m_pHead.compare_exchange_weak( pHead, &node, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + + return m_nItemCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ) + 1; + } + + /// Result of \ref ptb_gc_privatve "privatize" function. + /** + The \p privatize function returns retired node list as \p first and the size of that list as \p second. + */ + typedef std::pair privatize_result; + + /// Gets current list of retired pointer and clears the list + /**@anchor ptb_gc_privatve + */ + privatize_result privatize() + { + privatize_result res; + res.first = m_pHead.exchange( null_ptr(), CDS_ATOMIC::memory_order_acq_rel ); + + // Item counter is needed only as a threshold for liberate function + // So, we may clear the item counter without synchronization with m_pHead + res.second = m_nItemCount.exchange( 0, CDS_ATOMIC::memory_order_relaxed ); + return res; + } + + /// Returns current size of buffer (approximate) + size_t size() const + { + return m_nItemCount.load(CDS_ATOMIC::memory_order_relaxed); + } + }; + + /// Pool of retired pointers + /** + The class acts as an allocator of retired node. + Retired pointers are linked in the lock-free list. + */ + template + class retired_ptr_pool { + /// Pool item + typedef retired_ptr_node item; + + /// Count of items in block + static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1; + + /// Pool block + struct block { + block * pNext ; ///< next block + item items[m_nItemPerBlock] ; ///< item array + }; + + CDS_ATOMIC::atomic m_pBlockListHead ; ///< head of of allocated block list + + // To solve ABA problem we use epoch-based approach + static const unsigned int c_nEpochCount = 4 ; ///< Max epoch count + CDS_ATOMIC::atomic m_nCurEpoch ; ///< Current epoch + CDS_ATOMIC::atomic m_pEpochFree[c_nEpochCount] ; ///< List of free item per epoch + CDS_ATOMIC::atomic m_pGlobalFreeHead ; ///< Head of unallocated item list + + cds::details::Allocator< block, Alloc > m_BlockAllocator ; ///< block allocator + + private: + //@cond + void allocNewBlock() + { + // allocate new block + block * pNew = m_BlockAllocator.New(); + + // link items within the block + item * pLastItem = pNew->items + m_nItemPerBlock - 1; + for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) { + pItem->m_pNextFree = pItem + 1; + CDS_STRICT_DO( pItem->m_pNext = null_ptr() ); + } + + // link new block to block list + { + block * pHead = m_pBlockListHead.load(CDS_ATOMIC::memory_order_acquire); + do { + pNew->pNext = pHead; + // pHead is changed by compare_exchange_weak + } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } + + // link block's items to free list + { + item * pHead = m_pGlobalFreeHead.load(CDS_ATOMIC::memory_order_acquire); + do { + pLastItem->m_pNextFree = pHead; + // pHead is changed by compare_exchange_weak + } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } + } + + unsigned int current_epoch() const + { + return m_nCurEpoch.load(CDS_ATOMIC::memory_order_acquire) & (c_nEpochCount - 1); + } + unsigned int next_epoch() const + { + return (m_nCurEpoch.load(CDS_ATOMIC::memory_order_acquire) - 1) & (c_nEpochCount - 1); + } + //@endcond + + public: + //@cond + retired_ptr_pool() + : m_pBlockListHead(null_ptr()) + , m_nCurEpoch(0) + , m_pGlobalFreeHead( null_ptr()) + { + for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i ) + m_pEpochFree[i].store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + + allocNewBlock(); + } + + ~retired_ptr_pool() + { + block * p; + for ( block * pBlock = m_pBlockListHead.load(CDS_ATOMIC::memory_order_relaxed); pBlock; pBlock = p ) { + p = pBlock->pNext; + m_BlockAllocator.Delete( pBlock ); + } + } + + /// Increments current epoch + void inc_epoch() + { + m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_acq_rel ); + } + + //@endcond + + /// Allocates new retired pointer + retired_ptr_node& alloc() + { + unsigned int nEpoch; + item * pItem; + for (;;) { + pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(CDS_ATOMIC::memory_order_acquire); + if ( !pItem ) + goto retry; + if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + goto success; + } + + /* + item * pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(CDS_ATOMIC::memory_order_acquire); + while ( pItem ) { + if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + goto success; + } + */ + + // Epoch free list is empty + // Alloc from global free list + retry: + pItem = m_pGlobalFreeHead.load( CDS_ATOMIC::memory_order_acquire ); + do { + if ( !pItem ) { + allocNewBlock(); + goto retry; + } + // pItem is changed by compare_exchange_weak + } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + + success: + CDS_STRICT_DO( pItem->m_pNextFree = null_ptr() ); + return *pItem; + } + + /// Allocates and initializes new retired pointer + retired_ptr_node& alloc( const retired_ptr& p ) + { + retired_ptr_node& node = alloc(); + node.m_ptr = p; + return node; + } + + /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers) + /** + The list is linked on the m_pNextFree field + */ + void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) + { + assert( pHead != null_ptr() ); + assert( pTail != null_ptr() ); + + unsigned int nEpoch; + item * pCurHead; + do { + pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(CDS_ATOMIC::memory_order_acquire); + pTail->m_pNextFree = pCurHead; + } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } + }; + + /// Uninitialized guard + class guard: public cds::details::noncopyable + { + friend class ThreadGC; + protected: + details::guard_data * m_pGuard ; ///< Pointer to guard data + public: + /// Initialize empty guard. + guard() + : m_pGuard(null_ptr()) + {} + + /// Object destructor, does nothing + ~guard() + {} + + /// Guards pointer \p p + void set( void * p ) + { + assert( m_pGuard != null_ptr() ); + m_pGuard->pPost.store( p, CDS_ATOMIC::memory_order_release ); + //CDS_COMPILER_RW_BARRIER; + } + + /// Clears the guard + void clear() + { + assert( m_pGuard != null_ptr() ); + m_pGuard->pPost.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER ); + } + + /// Guards pointer \p p + template + T * operator =( T * p ) + { + set( reinterpret_cast( const_cast(p) )); + return p; + } + + public: // for ThreadGC. + /* + GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard, + the compiler produces error: ‘cds::gc::ptb::details::guard_data* cds::gc::ptb::details::guard::m_pGuard’ is protected + despite the fact that ThreadGC is declared as friend for guard class. + We should not like to declare m_pGuard member as public one. + Therefore, we have to add set_guard/get_guard public functions + */ + /// Set guard data + void set_guard( details::guard_data * pGuard ) + { + assert( m_pGuard == null_ptr() ); + m_pGuard = pGuard; + } + + /// Get current guard data + details::guard_data * get_guard() + { + return m_pGuard; + } + /// Get current guard data + details::guard_data * get_guard() const + { + return m_pGuard; + } + }; + + } // namespace details + + /// Guard + /** + This class represents auto guard: ctor allocates a guard from guard pool, + dtor returns the guard back to the pool of free guard. + */ + class Guard: public details::guard + { + //@cond + typedef details::guard base_class; + friend class ThreadGC; + //@endcond + + ThreadGC& m_gc ; ///< ThreadGC object of current thread + public: + /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread + Guard(ThreadGC& gc); + + /// Returns guard allocated back to pool of free guards + ~Guard(); // inline after GarbageCollector + + /// Returns PTB GC object + ThreadGC& getGC() + { + return m_gc; + } + + /// Guards pointer \p p + template + T * operator =( T * p ) + { + return base_class::operator =( p ); + } + }; + + /// Array of guards + /** + This class represents array of auto guards: ctor allocates \p Count guards from guard pool, + dtor returns the guards allocated back to the pool. + */ + template + class GuardArray: public cds::details::noncopyable + { + details::guard m_arr[Count] ; ///< array of guard + ThreadGC& m_gc ; ///< ThreadGC object of current thread + const static size_t c_nCapacity = Count ; ///< Array capacity (equal to \p Count template parameter) + + public: + /// Rebind array for other size \p OtherCount + template + struct rebind { + typedef GuardArray other ; ///< rebinding result + }; + + public: + /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread + GuardArray( ThreadGC& gc ) ; // inline below + + /// Returns guards allocated back to pool + ~GuardArray() ; // inline below + + /// Returns the capacity of array + CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT + { + return c_nCapacity; + } + + /// Returns PTB ThreadGC object + ThreadGC& getGC() CDS_NOEXCEPT + { + return m_gc; + } + + /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) + details::guard& operator []( size_t nIndex ) + { + assert( nIndex < capacity() ); + return m_arr[nIndex]; + } + + /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version] + const details::guard& operator []( size_t nIndex ) const + { + assert( nIndex < capacity() ); + return m_arr[nIndex]; + } + + /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count + template + void set( size_t nIndex, T * p ) + { + assert( nIndex < capacity() ); + m_arr[nIndex].set( p ); + } + + /// Clears (sets to NULL) the guard \p nIndex + void clear( size_t nIndex ) + { + assert( nIndex < capacity() ); + m_arr[nIndex].clear(); + } + + /// Clears all guards in the array + void clearAll() + { + for ( size_t i = 0; i < capacity(); ++i ) + clear(i); + } + }; + + /// Memory manager (Garbage collector) + class CDS_EXPORT_API GarbageCollector + { + private: + //@cond + friend class ThreadGC; + + /// Internal GC statistics + struct internal_stat + { + CDS_ATOMIC::atomic m_nGuardCount ; ///< Total guard count + CDS_ATOMIC::atomic m_nFreeGuardCount ; ///< Count of free guard + + internal_stat() + : m_nGuardCount(0) + , m_nFreeGuardCount(0) + {} + }; + //@endcond + + public: + /// Exception "No GarbageCollector object is created" + CDS_DECLARE_EXCEPTION( PTBManagerEmpty, "Global PTB GarbageCollector is NULL" ); + + /// Internal GC statistics + struct InternalState + { + size_t m_nGuardCount ; ///< Total guard count + size_t m_nFreeGuardCount ; ///< Count of free guard + + //@cond + InternalState() + : m_nGuardCount(0) + , m_nFreeGuardCount(0) + {} + + InternalState& operator =( internal_stat const& s ) + { + m_nGuardCount = s.m_nGuardCount.load(CDS_ATOMIC::memory_order_relaxed); + m_nFreeGuardCount = s.m_nFreeGuardCount.load(CDS_ATOMIC::memory_order_relaxed); + + return *this; + } + //@endcond + }; + + private: + static GarbageCollector * m_pManager ; ///< GC global instance + + details::guard_allocator<> m_GuardPool ; ///< Guard pool + details::retired_ptr_pool<> m_RetiredAllocator ; ///< Pool of free retired pointers + details::retired_ptr_buffer m_RetiredBuffer ; ///< Retired pointer buffer for liberating + //CDS_ATOMIC::atomic m_nInLiberate ; ///< number of parallel \p liberate fnction call + + CDS_ATOMIC::atomic m_nLiberateThreshold; ///< Max size of retired pointer buffer to call liberate + const size_t m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC + + internal_stat m_stat ; ///< Internal statistics + bool m_bStatEnabled ; ///< Internal Statistics enabled + + public: + /// Initializes PTB memory manager singleton + /** + This member function creates and initializes PTB global object. + The function should be called before using CDS data structure based on cds::gc::PTB GC. Usually, + this member function is called in the \p main() function. See cds::gc::ptb for example. + After calling of this function you may use CDS data structures based on cds::gc::PTB. + + \par Parameters + \li \p nLiberateThreshold - the liberate threshold. When count of retired pointers reaches this value, + the \ref ptb_gc_liberate "liberate" member function would be called for freeing retired pointers. + If \p nLiberateThreshold <= 1, \p liberate would called after each \ref ptb_gc_retirePtr "retirePtr" call. + \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread + is initialized the GC allocates local guard pool for the thread from common guard pool. + By perforce the local thread's guard pool is grown automatically from common pool. + When the thread terminated its guard pool is backed to common GC's pool. + + */ + static void CDS_STDCALL Construct( + size_t nLiberateThreshold = 1024 + , size_t nInitialThreadGuardCount = 8 + ); + + /// Destroys PTB memory manager + /** + The member function destroys PTB global object. After calling of this function you may \b NOT + use CDS data structures based on cds::gc::PTB. Usually, the \p Destruct function is called + at the end of your \p main(). See cds::gc::ptb for example. + */ + static void CDS_STDCALL Destruct(); + + /// Returns pointer to GarbageCollector instance + /** + If PTB GC is not initialized, \p PTBManagerEmpty exception is thrown + */ + static GarbageCollector& instance() + { + if ( m_pManager == null_ptr() ) + throw PTBManagerEmpty(); + return *m_pManager; + } + + /// Checks if global GC object is constructed and may be used + static bool isUsed() CDS_NOEXCEPT + { + return m_pManager != null_ptr(); + } + + public: + //@{ + /// Internal interface + + /// Allocates a guard + details::guard_data * allocGuard() + { + return m_GuardPool.alloc(); + } + + /// Frees guard \p g for reusing in future + void freeGuard(details::guard_data * pGuard ) + { + m_GuardPool.free( pGuard ); + } + + /// Allocates guard list for a thread. + details::guard_data * allocGuardList( size_t nCount ) + { + return m_GuardPool.allocList( nCount ); + } + + /// Frees thread's guard list pointed by \p pList + void freeGuardList( details::guard_data * pList ) + { + m_GuardPool.freeList( pList ); + } + + /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation + /**@anchor ptb_gc_retirePtr + */ + template + void retirePtr( T * p, void (* pFunc)(T *) ) + { + retirePtr( retired_ptr( reinterpret_cast( p ), reinterpret_cast( pFunc ) ) ); + } + + /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation + void retirePtr( retired_ptr const& p ) + { + if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(CDS_ATOMIC::memory_order_relaxed) ) + liberate(); + } + + protected: + /// Liberate function + /** @anchor ptb_gc_liberate + The main function of Pass The Buck algorithm. It tries to free retired pointers if they are not + trapped by any guard. + */ + void liberate(); + + //@} + + private: + //@cond +#if 0 + void liberate( details::liberate_set& set ); +#endif + //@endcond + + public: + /// Get internal statistics + InternalState& getInternalState(InternalState& stat) const + { + return stat = m_stat; + } + + /// Checks if internal statistics enabled + bool isStatisticsEnabled() const + { + return m_bStatEnabled; + } + + /// Enables/disables internal statistics + bool enableStatistics( bool bEnable ) + { + bool bEnabled = m_bStatEnabled; + m_bStatEnabled = bEnable; + return bEnabled; + } + + private: + //@cond none + GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount ); + ~GarbageCollector(); + //@endcond + }; + + /// Thread GC + /** + To use Pass The Buck reclamation schema each thread object must be linked with the object of ThreadGC class + that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()" + on the start of each thread that uses PTB GC. Before terminating the thread linked to PTB GC it is necessary to call + \ref cds_threading "cds::threading::Manager::detachThread()". + + The ThreadGC object maintains two list: + \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field) + \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field) + Free guard list is a subset of thread guard list. + */ + class ThreadGC: public cds::details::noncopyable + { + GarbageCollector& m_gc ; ///< reference to GC singleton + details::guard_data * m_pList ; ///< Local list of guards owned by the thread + details::guard_data * m_pFree ; ///< The list of free guard from m_pList + + public: + ThreadGC() + : m_gc( GarbageCollector::instance() ) + , m_pList( null_ptr() ) + , m_pFree( null_ptr() ) + {} + + /// Dtor calls fini() + ~ThreadGC() + { + fini(); + } + + /// Initialization. Repeat call is available + void init() + { + if ( !m_pList ) { + m_pList = + m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount ); + } + } + + /// Finalization. Repeat call is available + void fini() + { + if ( m_pList ) { + m_gc.freeGuardList( m_pList ); + m_pList = + m_pFree = null_ptr(); + } + } + + public: + /// Initializes guard \p g + void allocGuard( Guard& g ) + { + assert( m_pList != null_ptr() ); + if ( m_pFree ) { + g.m_pGuard = m_pFree; + m_pFree = m_pFree->pNextFree.load(CDS_ATOMIC::memory_order_relaxed); + } + else { + g.m_pGuard = m_gc.allocGuard(); + g.m_pGuard->pThreadNext = m_pList; + m_pList = g.m_pGuard; + } + } + + /// Frees guard \p g + void freeGuard( Guard& g ) + { + assert( m_pList != null_ptr() ); + g.m_pGuard->pPost.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + g.m_pGuard->pNextFree.store( m_pFree, CDS_ATOMIC::memory_order_relaxed ); + m_pFree = g.m_pGuard; + } + + /// Initializes guard array \p arr + template + void allocGuard( GuardArray& arr ) + { + assert( m_pList != null_ptr() ); + size_t nCount = 0; + + while ( m_pFree && nCount < Count ) { + arr[nCount].set_guard( m_pFree ); + m_pFree = m_pFree->pNextFree.load(CDS_ATOMIC::memory_order_relaxed); + ++nCount; + } + + while ( nCount < Count ) { + details::guard& g = arr[nCount++]; + g.set_guard( m_gc.allocGuard() ); + g.get_guard()->pThreadNext = m_pList; + m_pList = g.get_guard(); + } + } + + /// Frees guard array \p arr + template + void freeGuard( GuardArray& arr ) + { + assert( m_pList != null_ptr() ); + + details::guard_data * pGuard; + for ( size_t i = 0; i < Count - 1; ++i ) { + pGuard = arr[i].get_guard(); + pGuard->pPost.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + pGuard->pNextFree.store( arr[i+1].get_guard(), CDS_ATOMIC::memory_order_relaxed ); + } + pGuard = arr[Count-1].get_guard(); + pGuard->pPost.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + pGuard->pNextFree.store( m_pFree, CDS_ATOMIC::memory_order_relaxed ); + m_pFree = arr[0].get_guard(); + } + + /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation + template + void retirePtr( T * p, void (* pFunc)(T *) ) + { + m_gc.retirePtr( p, pFunc ); + } + + //@cond + void scan() + { + m_gc.liberate(); + } + //@endcond + + }; + + ////////////////////////////////////////////////////////// + // Inlines + + inline Guard::Guard(ThreadGC& gc) + : m_gc( gc ) + { + getGC().allocGuard( *this ); + } + inline Guard::~Guard() + { + getGC().freeGuard( *this ); + } + + template + inline GuardArray::GuardArray( ThreadGC& gc ) + : m_gc( gc ) + { + getGC().allocGuard( *this ); + } + template + inline GuardArray::~GuardArray() + { + getGC().freeGuard( *this ); + } + + } // namespace ptb +}} // namespace cds::gc + +#if CDS_COMPILER == CDS_COMPILER_MSVC +# pragma warning(pop) +#endif + + +#endif // #ifndef __CDS_GC_PTB_PASS_THE_BUCK_H diff --git a/cds/gc/ptb_decl.h b/cds/gc/ptb_decl.h new file mode 100644 index 00000000..790d5858 --- /dev/null +++ b/cds/gc/ptb_decl.h @@ -0,0 +1,478 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_PTB_DECL_H +#define __CDS_GC_PTB_DECL_H + +#include +#include +#include + +namespace cds { namespace gc { + + /// Pass-the-Buck garbage collector + /** @ingroup cds_garbage_collector + @headerfile cds/gc/ptb.h + This class is a wrapper for Pass-the-Buck garbage collector internal implementation. + + Sources: + - [2002] M. Herlihy, V. Luchangco, and M. Moir. The repeat offender problem: A mechanism for supporting + dynamic-sized lockfree data structures. Technical Report TR-2002-112, Sun Microsystems Laboratories, 2002 + - [2002] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Dynamic-sized Lockfree Data Structures. + Technical Report TR-2002-110, Sun Microsystems Laboratories, 2002 + - [2005] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Nonblocking Memory Management Support + for Dynamic_Sized Data Structures. ACM Transactions on Computer Systems, Vol.23, No.2, May 2005 + + See \ref cds_how_to_use "How to use" section for details of garbage collector applying. + */ + class PTB + { + public: + /// Native guarded pointer type + typedef void * guarded_pointer; + +#ifdef CDS_CXX11_TEMPLATE_ALIAS_SUPPORT + /// Atomic reference + /** + @headerfile cds/gc/ptb.h + */ + template using atomic_ref = CDS_ATOMIC::atomic; + + /// Atomic type + /** + @headerfile cds/gc/ptb.h + */ + template using atomic_type = CDS_ATOMIC::atomic; + + /// Atomic marked pointer + /** + @headerfile cds/gc/ptb.h + */ + template using atomic_marked_ptr = CDS_ATOMIC::atomic; +#else + template + class atomic_ref: public CDS_ATOMIC::atomic + { + typedef CDS_ATOMIC::atomic base_class; + public: +# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT + atomic_ref() = default; +# else + atomic_ref() + : base_class() + {} +# endif + explicit CDS_CONSTEXPR atomic_ref(T * p) CDS_NOEXCEPT + : base_class( p ) + {} + }; + + template + class atomic_type: public CDS_ATOMIC::atomic + { + typedef CDS_ATOMIC::atomic base_class; + public: +# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT + atomic_type() = default; +# else + atomic_type() CDS_NOEXCEPT + : base_class() + {} +# endif + explicit CDS_CONSTEXPR atomic_type(T const & v) CDS_NOEXCEPT + : base_class( v ) + {} + }; + + template + class atomic_marked_ptr: public CDS_ATOMIC::atomic + { + typedef CDS_ATOMIC::atomic base_class; + public: +# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT + atomic_marked_ptr() = default; +# else + atomic_marked_ptr() + : base_class() + {} +# endif + explicit CDS_CONSTEXPR atomic_marked_ptr(MarkedPtr val) CDS_NOEXCEPT + : base_class( val ) + {} + explicit CDS_CONSTEXPR atomic_marked_ptr(typename MarkedPtr::value_type * p) CDS_NOEXCEPT + : base_class( p ) + {} + }; +#endif + + /// Thread GC implementation for internal usage + typedef ptb::ThreadGC thread_gc_impl; + + /// Wrapper for ptb::ThreadGC class + /** + @headerfile cds/gc/ptb.h + This class performs automatically attaching/detaching Pass-the-Buck GC + for the current thread. + */ + class thread_gc: public thread_gc_impl + { + //@cond + bool m_bPersistent; + //@endcond + public: + /// Constructor + /** + The constructor attaches the current thread to the Pass-the-Buck GC + if it is not yet attached. + The \p bPersistent parameter specifies attachment persistence: + - \p true - the class destructor will not detach the thread from Pass-the-Buck GC. + - \p false (default) - the class destructor will detach the thread from Pass-the-Buck GC. + */ + thread_gc( + bool bPersistent = false + ) ; // inline in ptb_impl.h + + /// Destructor + /** + If the object has been created in persistent mode, the destructor does nothing. + Otherwise it detaches the current thread from Pass-the-Buck GC. + */ + ~thread_gc() ; // inline in ptb_impl.h + }; + + /// Base for container node + /** + @headerfile cds/gc/ptb.h + This struct is empty for Pass-the-Buck GC + */ + struct container_node + {}; + + + /// Pass-the-Buck guard + /** + @headerfile cds/gc/ptb.h + This class is a wrapper for ptb::Guard. + */ + class Guard: public ptb::Guard + { + //@cond + typedef ptb::Guard base_class; + //@endcond + + public: + //@cond + Guard() ; // inline in ptb_impl.h + //@endcond + + /// Protects a pointer of type atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store it + to the HP slot repeatedly until the guard's value equals \p toGuard + */ + template + T protect( CDS_ATOMIC::atomic const& toGuard ) + { + T pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed); + T pRet; + do { + pRet = assign( pCur ); + pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire); + } while ( pRet != pCur ); + return pCur; + } + + /// Protects a converted pointer of type atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store result of \p f functor + to the HP slot repeatedly until the guard's value equals \p toGuard. + + The function is useful for intrusive containers when \p toGuard is a node pointer + that should be converted to a pointer to the value type before guarding. + The parameter \p f of type Func is a functor that makes this conversion: + \code + struct functor { + value_type * operator()( T * p ); + }; + \endcode + Really, the result of f( toGuard.load() ) is assigned to the hazard pointer. + */ + template + T protect( CDS_ATOMIC::atomic const& toGuard, Func f ) + { + T pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed); + T pRet; + do { + pRet = pCur; + assign( f( pCur ) ); + pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire); + } while ( pRet != pCur ); + return pCur; + } + + /// Store \p p to the guard + /** + The function equals to a simple assignment, no loop is performed. + Can be used for a pointer that cannot be changed concurrently. + */ + template + T * assign( T * p ) + { + return base_class::operator =(p); + } + + /// Store marked pointer \p p to the guard + /** + The function equals to a simple assignment of p.ptr(), no loop is performed. + Can be used for a marked pointer that cannot be changed concurrently. + */ + template + T * assign( cds::details::marked_ptr p ) + { + return base_class::operator =( p.ptr() ); + } + + /// Copy from \p src guard to \p this guard + void copy( Guard const& src ) + { + assign( src.get_native() ); + } + + /// Clear value of the guard + void clear() + { + base_class::clear(); + } + + /// Get the value currently protected (relaxed read) + template + T * get() const + { + return reinterpret_cast( get_native() ); + } + + /// Get native guarded pointer stored + guarded_pointer get_native() const + { + return base_class::get_guard()->pPost.load(CDS_ATOMIC::memory_order_relaxed); + } + + }; + + /// Array of Pass-the-Buck guards + /** + @headerfile cds/gc/ptb.h + This class is a wrapper for ptb::GuardArray template. + Template parameter \p Count defines the size of PTB array. + */ + template + class GuardArray: public ptb::GuardArray + { + //@cond + typedef ptb::GuardArray base_class; + //@endcond + public: + /// Rebind array for other size \p COUNT2 + template + struct rebind { + typedef GuardArray other ; ///< rebinding result + }; + + public: + //@cond + GuardArray() ; // inline in ptb_impl.h + //@endcond + + /// Protects a pointer of type \p atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store it + to the slot \p nIndex repeatedly until the guard's value equals \p toGuard + */ + template + T protect(size_t nIndex, CDS_ATOMIC::atomic const& toGuard ) + { + T pRet; + do { + pRet = assign( nIndex, toGuard.load(CDS_ATOMIC::memory_order_relaxed) ); + } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_acquire)); + + return pRet; + } + + /// Protects a pointer of type \p atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store it + to the slot \p nIndex repeatedly until the guard's value equals \p toGuard + + The function is useful for intrusive containers when \p toGuard is a node pointer + that should be converted to a pointer to the value type before guarding. + The parameter \p f of type Func is a functor that makes this conversion: + \code + struct functor { + value_type * operator()( T * p ); + }; + \endcode + Really, the result of f( toGuard.load() ) is assigned to the hazard pointer. + */ + template + T protect(size_t nIndex, CDS_ATOMIC::atomic const& toGuard, Func f ) + { + T pRet; + do { + assign( nIndex, f( pRet = toGuard.load(CDS_ATOMIC::memory_order_relaxed) )); + } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_acquire)); + + return pRet; + } + + /// Store \p to the slot \p nIndex + /** + The function equals to a simple assignment, no loop is performed. + */ + template + T * assign( size_t nIndex, T * p ) + { + base_class::set(nIndex, p); + return p; + } + + /// Store marked pointer \p p to the guard + /** + The function equals to a simple assignment of p.ptr(), no loop is performed. + Can be used for a marked pointer that cannot be changed concurrently. + */ + template + T * assign( size_t nIndex, cds::details::marked_ptr p ) + { + return assign( nIndex, p.ptr() ); + } + + /// Copy guarded value from \p src guard to slot at index \p nIndex + void copy( size_t nIndex, Guard const& src ) + { + assign( nIndex, src.get_native() ); + } + + /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex + void copy( size_t nDestIndex, size_t nSrcIndex ) + { + assign( nDestIndex, get_native( nSrcIndex )); + } + + /// Clear value of the slot \p nIndex + void clear( size_t nIndex) + { + base_class::clear( nIndex ); + } + + /// Get current value of slot \p nIndex + template + T * get( size_t nIndex) const + { + return reinterpret_cast( get_native( nIndex ) ); + } + + /// Get native guarded pointer stored + guarded_pointer get_native( size_t nIndex ) const + { + return base_class::operator[](nIndex).get_guard()->pPost.load(CDS_ATOMIC::memory_order_relaxed); + } + + /// Capacity of the guard array + static CDS_CONSTEXPR size_t capacity() + { + return Count; + } + }; + + public: + /// Initializes ptb::GarbageCollector singleton + /** + The constructor calls GarbageCollector::Construct with passed parameters. + See ptb::GarbageCollector::Construct for explanation of parameters meaning. + */ + PTB( + size_t nLiberateThreshold = 1024 + , size_t nInitialThreadGuardCount = 8 + ) + { + ptb::GarbageCollector::Construct( + nLiberateThreshold, + nInitialThreadGuardCount + ); + } + + /// Terminates ptb::GarbageCollector singleton + /** + The destructor calls \code ptb::GarbageCollector::Destruct() \endcode + */ + ~PTB() + { + ptb::GarbageCollector::Destruct(); + } + + /// Checks if count of hazard pointer is no less than \p nCountNeeded + /** + The function always returns \p true since the guard count is unlimited for + PTB garbage collector. + */ + static bool check_available_guards( size_t nCountNeeded, bool /*bRaiseException*/ = true ) + { + CDS_UNUSED( nCountNeeded ); + return true; + } + + /// Retire pointer \p p with function \p pFunc + /** + The function places pointer \p p to array of pointers ready for removing. + (so called retired pointer array). The pointer can be safely removed when no guarded pointer points to it. + Deleting the pointer is the function \p pFunc call. + */ + template + static void retire( T * p, void (* pFunc)(T *) ) + { + ptb::GarbageCollector::instance().retirePtr( p, pFunc ); + } + + /// Retire pointer \p p with functor of type \p Disposer + /** + The function places pointer \p p to array of pointers ready for removing. + (so called retired pointer array). The pointer can be safely removed when no guarded pointer points to it. + + See gc::HP::retire for \p Disposer requirements. + */ + template + static void retire( T * p ) + { + retire( p, cds::details::static_functor::call ); + } + + /// Checks if Pass-the-Buck GC is constructed and may be used + static bool isUsed() + { + return ptb::GarbageCollector::isUsed(); + } + + /// Forced GC cycle call for current thread + /** + Usually, this function should not be called directly. + */ + static void scan() ; // inline in ptb_impl.h + + /// Synonym for \ref scan() + static void force_dispose() + { + scan(); + } + }; + +}} // namespace cds::gc + +#endif // #ifndef __CDS_GC_PTB_DECL_H diff --git a/cds/gc/ptb_impl.h b/cds/gc/ptb_impl.h new file mode 100644 index 00000000..201564cf --- /dev/null +++ b/cds/gc/ptb_impl.h @@ -0,0 +1,43 @@ +//$$CDS-header$$ + +#ifndef __CDS_GC_PTB_IMPL_H +#define __CDS_GC_PTB_IMPL_H + +#include + +//@cond +namespace cds { namespace gc { + + inline PTB::thread_gc::thread_gc( + bool bPersistent + ) + : m_bPersistent( bPersistent ) + { + if ( !cds::threading::Manager::isThreadAttached() ) + cds::threading::Manager::attachThread(); + } + + inline PTB::thread_gc::~thread_gc() + { + if ( !m_bPersistent ) + cds::threading::Manager::detachThread(); + } + + inline PTB::Guard::Guard() + : Guard::base_class( cds::threading::getGC() ) + {} + + template + inline PTB::GuardArray::GuardArray() + : GuardArray::base_class( cds::threading::getGC() ) + {} + + inline void PTB::scan() + { + cds::threading::getGC().scan(); + } + +}} // namespace cds::gc +//@endcond + +#endif // #ifndef __CDS_GC_PTB_IMPL_H diff --git a/cds/init.h b/cds/init.h new file mode 100644 index 00000000..60085809 --- /dev/null +++ b/cds/init.h @@ -0,0 +1,89 @@ +//$$CDS-header$$ + +#ifndef __CDS_INIT_H +#define __CDS_INIT_H + +#include +#include +#include +#include + +namespace cds { + + //@cond + namespace details { + bool CDS_EXPORT_API init_first_call(); + bool CDS_EXPORT_API fini_last_call(); + } // namespace details + //@endcond + + /// Initialize CDS library + /** + The function initializes \p CDS library framework. + Before usage of \p CDS library features your application must initialize it + by calling \p Initialize function: + @code + #include + #include + + int main() + { + // // Initialize CDS library + cds::Initialize( 0 ); + + { + // // Initialize Hazard Pointer GC (if it is needed for you) + cds::gc::HP(); + + // // Now you can use CDS library containers with Hazard Pointer GC + ... + + } + // // Teminate CDS library + cds::Terminate(); + + return 0; + } + @endcode + + You may call \p Initialize several times, only first call is significant others will be ignored. + To terminate the \p CDS library correctly, each call to \p Initialize must be balanced by a corresponding call to \ref Terminate. + + Note, that this function does not initialize garbage collectors. To use GC you need you should call + GC-specific constructor function to initialize internal structures of GC. See cds::gc and its subnamespace for details. + */ + static inline void Initialize( + unsigned int nFeatureFlags = 0 ///< for future use, must be zero. + ) + { + CDS_UNUSED( nFeatureFlags ); + + if ( cds::details::init_first_call() ) + { + cds::OS::topology::init(); + cds::threading::ThreadData::s_nProcCount = cds::OS::topology::processor_count(); + if ( cds::threading::ThreadData::s_nProcCount == 0 ) + cds::threading::ThreadData::s_nProcCount = 1; + + cds::threading::Manager::init(); + } + } + + /// Terminate CDS library + /** + This function terminates \p CDS library. + After \p Terminate calling many features of the library are unavailable. + This call should be the last call of \p CDS library in your application. + */ + static inline void Terminate() + { + if ( cds::details::fini_last_call() ) { + cds::threading::Manager::fini(); + + cds::OS::topology::fini(); + } + } + +} // namespace cds + +#endif // __CDS_INIT_H diff --git a/cds/int_algo.h b/cds/int_algo.h new file mode 100644 index 00000000..5e20a7b0 --- /dev/null +++ b/cds/int_algo.h @@ -0,0 +1,74 @@ +//$$CDS-header$$ + +#ifndef __CDS_INT_ALGO_H +#define __CDS_INT_ALGO_H + +#include + +namespace cds { namespace beans { + + /// Returns largest previous integer for log2( n ) + static inline size_t log2floor( size_t n ) + { + return n ? cds::bitop::MSBnz( n ) : 0; + } + + /// Returns smallest following integer for log2( n ) + static inline size_t log2ceil( size_t n ) + { + size_t i = log2floor( n ); + return size_t( 1 << i ) < n ? i + 1 : i; + } + + /// Returns largest previous power of 2 for \p n + /** + Examples: + \code + floor2(0) == 1 // !!! + floor2(1) == 1 + floor2(2) == 2 + floor2(3) == 2 + floor2(4) == 4 + floor2(15) == 8 + floor2(16) == 16 + floor2(17) == 16 + \endcode + */ + static inline size_t floor2( size_t n ) + { + return size_t(1) << log2floor( n ); + } + + /// Returns smallest following power of 2 for \p n + /** + Examples: + \code + ceil2(0) == 1 // !!! + ceil2(1) == 1 + ceil2(2) == 2 + ceil2(3) == 4 + ceil2(4) == 4 + ceil2(15) == 16 + ceil2(16) == 16 + ceil2(17) == 32 + \endcode + */ + static inline size_t ceil2( size_t n ) + { + return size_t(1) << log2ceil( n ); + } + + /// Checks if \p n is power of 2 + CDS_CONSTEXPR static inline bool is_power2( size_t n ) CDS_NOEXCEPT + { + return (n & (n - 1)) == 0 && n; + } + + /// Returns binary logarithm of \p n if \p n is power of two, otherwise returns 0 + static inline size_t log2( size_t n ) + { + return is_power2(n) ? log2floor(n) : 0; + } +}} // namespace cds::beans + +#endif // #ifndef __CDS_INT_ALGO_H diff --git a/cds/intrusive/base.h b/cds/intrusive/base.h new file mode 100644 index 00000000..1247f4b8 --- /dev/null +++ b/cds/intrusive/base.h @@ -0,0 +1,160 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_BASE_H +#define __CDS_INTRUSIVE_BASE_H + +#include +#include +#include + +namespace cds { + +/// Intrusive containers +/** + @ingroup cds_intrusive_containers + The namespace cds::intrusive contains intrusive lock-free containers. + The idea comes from \p boost::intrusive library, see http://boost.org/doc/ as a good introduction to intrusive approach. + The intrusive containers of libcds library is developed as close to boost::intrusive + + In terms of lock-free approach, the main advantage of intrusive containers is + that no memory allocation is performed to maintain container items. + However, additional requirements is imposed for types and values that can be stored in intrusive container. + See the container documentation for details. + + Restriction for Gidenstam's garbage collector cds::gc::HRC: + the Gidenstam's garbage collector makes additional requirements to type of item in intrusive container. + Therefore, for this GC only \p base_hook is allowed as the value of opt::hook option. + + \anchor cds_intrusive_item_destroying + \par Destroying items + + It should be very careful when destroying an item removed from intrusive container. + In other threads the references to popped item may exists some time after removing. + To destroy the removed item in thread-safe manner you should call static function \p retire + of garbage collector you use, for example: + \code + struct destroyer { + void operator ()( my_type * p ) + { + delete p; + } + }; + + typedef cds::intrusive::TreiberStack< cds::gc::HP, my_type, cds::opt::disposer< destroyer > > stack; + stack s; + + // .... + + my_type * p = s.pop(); + + if ( p ) { + // It is wrong + // delete p; + + // It is correct + cds::gc:HP::retire< destroyer >( p ); + } + \endcode + The situation becomes even more complicated when you want store items in different intrusive containers. + In this case the best way is using reference counting: + \code + struct my_type { + ... + std::atomic nRefCount; + + my_type() + : nRefCount(0) + {} + }; + + struct destroyer { + void operator ()( my_type * p ) + { + if ( --p->nRefCount == 0 ) + delete p ; // delete only after no reference pointing to p + } + }; + + typedef cds::intrusive::TreiberStack< cds::gc::HP, my_type, cds::opt::disposer< destroyer > > stack; + typedef cds::intrusive::MSQueue< cds::gc::HP, my_type, cds::opt::disposer< destroyer > > queue; + stack s; + queue q; + + my_type * v = new my_type(); + + v.nRefCount++ ; // increment counter before pushing the item to the stack + s.push(v); + + v.nRefCount++ ; // increment counter before pushing the item to the queue + q.push(v); + + // .... + + my_type * ps = s.pop(); + if ( ps ) { + // It is wrong + // delete ps; + + // It is correct + cds::gc:HP::retire< destroyer >( ps ); + } + + my_type * pq = q.pop(); + if ( pq ) { + // It is wrong + // delete pq; + + // It is correct + cds::gc:HP::retire< destroyer >( pq ); + } + \endcode + Violation of these rules may lead to a crash. + + \par Intrusive containers and Hazard Pointer-like garbage collectors + + If you develop your intrusive container based on libcds library framework, you should + take in the account the following. + The main idea of garbage collectors (GC) based on Hazard Pointer schema is protecting a shared pointer + by publishing it as a "hazard" one i.e. as a pointer that is changing at the current time and cannot be + deleted at this moment. In intrusive container paradigm, the pointer to the node of the container + and the pointer to the item stored in the container are not equal in the general case. + However, any pointer to the node should be castable to the appropriate pointer to the container's item. + In general, any item can be placed to some different intrusive containers simultaneously, + and each of those container holds a unique pointer to its node that refers to the same item. + When we protect a pointer, we want to protect an item pointer that is the invariant + for any container stored that item. In your intrusive container, instead of protecting by GC's guard a pointer to an node + you should convert it to the pointer to the item and then protect resulting item pointer. + Otherwise an unpredictable result may occur. + +*/ +namespace intrusive { + + /// @defgroup cds_intrusive_containers Intrusive containers + /** @defgroup cds_intrusive_helper Helper structs for intrusive containers + @ingroup cds_intrusive_containers + */ + /** @defgroup cds_intrusive_stack Stack + @ingroup cds_intrusive_containers + */ + /** @defgroup cds_intrusive_queue Queue + @ingroup cds_intrusive_containers + */ + /** @defgroup cds_intrusive_priority_queue Priority queue + @ingroup cds_intrusive_containers + */ + /** @defgroup cds_intrusive_deque Deque + @ingroup cds_intrusive_containers + */ + /** @defgroup cds_intrusive_map Set + @ingroup cds_intrusive_containers + */ + /** @defgroup cds_intrusive_tree Tree + @ingroup cds_intrusive_containers + */ + /** @defgroup cds_intrusive_list List + @ingroup cds_intrusive_containers + */ + +}} // namespace cds::intrusuve + +#endif // #ifndef __CDS_INTRUSIVE_BASE_H diff --git a/cds/intrusive/basket_queue.h b/cds/intrusive/basket_queue.h new file mode 100644 index 00000000..2d4f204d --- /dev/null +++ b/cds/intrusive/basket_queue.h @@ -0,0 +1,813 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_BASKET_QUEUE_H +#define __CDS_INTRUSIVE_BASKET_QUEUE_H + +#include +#include +#include +#include +#include +#include + +#include + +namespace cds { namespace intrusive { + + /// BasketQueue -related definitions + /** @ingroup cds_intrusive_helper + */ + namespace basket_queue { + /// BasketQueue node + /** + Template parameters: + - GC - garbage collector used + - Tag - a tag used to distinguish between different implementation + */ + template + struct node: public GC::container_node + { + typedef GC gc ; ///< Garbage collector + typedef Tag tag ; ///< tag + + typedef cds::details::marked_ptr marked_ptr ; ///< marked pointer + typedef typename gc::template atomic_marked_ptr< marked_ptr> atomic_marked_ptr ; ///< atomic marked pointer specific for GC + + /// Rebind node for other template parameters + template + struct rebind { + typedef node other ; ///< Rebinding result + }; + + atomic_marked_ptr m_pNext ; ///< pointer to the next node in the container + + node() + : m_pNext( null_ptr() ) + {} + }; + + //@cond + // Specialization for HRC GC + template + struct node< gc::HRC, Tag>: public gc::HRC::container_node + { + typedef gc::HRC gc ; ///< Garbage collector + typedef Tag tag ; ///< tag + + typedef cds::details::marked_ptr marked_ptr ; ///< marked pointer + typedef typename gc::template atomic_marked_ptr< marked_ptr> atomic_marked_ptr ; ///< atomic marked pointer specific for GC + + atomic_marked_ptr m_pNext ; ///< pointer to the next node in the container + + node() + : m_pNext(null_ptr()) + {} + + protected: + virtual void cleanUp( cds::gc::hrc::ThreadGC * pGC ) + { + assert( pGC != null_ptr() ); + typename gc::template GuardArray<2> aGuards( *pGC ); + + while ( true ) { + marked_ptr pNext = aGuards.protect( 0, m_pNext ); + if ( pNext.ptr() && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) { + marked_ptr p = aGuards.protect( 1, pNext->m_pNext ); + m_pNext.compare_exchange_strong( pNext, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ); + continue; + } + else { + break; + } + } + } + + virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent ) + { + if ( bConcurrent ) { + marked_ptr pNext = m_pNext.load(CDS_ATOMIC::memory_order_relaxed); + do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ); + } + else { + m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed ); + } + } + }; + //@endcond + + using single_link::default_hook; + + //@cond + template < typename HookType, CDS_DECL_OPTIONS2> + struct hook + { + typedef typename opt::make_options< default_hook, CDS_OPTIONS2>::type options; + typedef typename options::gc gc; + typedef typename options::tag tag; + typedef node node_type; + typedef HookType hook_type; + }; + //@endcond + + + /// Base hook + /** + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - tag + */ + template < CDS_DECL_OPTIONS2 > + struct base_hook: public hook< opt::base_hook_tag, CDS_OPTIONS2 > + {}; + + /// Member hook + /** + \p MemberOffset defines offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - tag + */ + template < size_t MemberOffset, CDS_DECL_OPTIONS2 > + struct member_hook: public hook< opt::member_hook_tag, CDS_OPTIONS2 > + { + //@cond + static const size_t c_nMemberOffset = MemberOffset; + //@endcond + }; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - tag + */ + template + struct traits_hook: public hook< opt::traits_hook_tag, CDS_OPTIONS2 > + { + //@cond + typedef NodeTraits node_traits; + //@endcond + }; + + +#if defined(CDS_CXX11_TEMPLATE_ALIAS_SUPPORT) && !defined(CDS_DOXYGEN_INVOKED) + template < typename Node, opt::link_check_type LinkType > using get_link_checker = single_link::get_link_checker< Node, LinkType >; +#else + /// Metafunction for selecting appropriate link checking policy + template < typename Node, opt::link_check_type LinkType > + struct get_link_checker: public single_link::get_link_checker< Node, LinkType > + {}; + +#endif + + /// Basket queue internal statistics. May be used for debugging or profiling + /** + Basket queue statistics derives from cds::intrusive::queue_stat + and extends it by two additional fields specific for the algorithm. + */ + template + struct stat: public cds::intrusive::queue_stat< Counter > + { + //@cond + typedef cds::intrusive::queue_stat< Counter > base_class; + typedef typename base_class::counter_type counter_type; + //@endcond + + counter_type m_TryAddBasket ; ///< Count of attemps adding new item to a basket (only or BasketQueue, for other queue this metric is not used) + counter_type m_AddBasketCount ; ///< Count of events "Enqueue a new item into basket" (only or BasketQueue, for other queue this metric is not used) + + /// Register an attempt t add new item to basket + void onTryAddBasket() { ++m_TryAddBasket; } + /// Register event "Enqueue a new item into basket" (only or BasketQueue, for other queue this metric is not used) + void onAddBasket() { ++m_AddBasketCount; } + + //@cond + void reset() + { + base_class::reset(); + m_TryAddBasket.reset(); + m_AddBasketCount.reset(); + } + + stat& operator +=( stat const& s ) + { + base_class::operator +=( s ); + m_TryAddBasket += s.m_TryAddBasket.get(); + m_AddBasketCount += s.m_AddBasketCount.get(); + return *this; + } + //@endcond + }; + + /// Dummy basket queue statistics - no counting is performed. Support interface like \ref stat + struct dummy_stat: public cds::intrusive::queue_dummy_stat + { + //@cond + void onTryAddBasket() {} + void onAddBasket() {} + + void reset() {} + dummy_stat& operator +=( dummy_stat const& ) + { + return *this; + } + //@endcond + }; + + } // namespace basket_queue + + /// Basket lock-free queue (intrusive variant) + /** @ingroup cds_intrusive_queue + Implementation of basket queue algorithm. + + \par Source: + [2007] Moshe Hoffman, Ori Shalev, Nir Shavit "The Baskets Queue" + + Key idea + + In the “basket” approach, instead of + the traditional ordered list of nodes, the queue consists of an ordered list of groups + of nodes (logical baskets). The order of nodes in each basket need not be specified, and in + fact, it is easiest to maintain them in FIFO order. The baskets fulfill the following basic + rules: + - Each basket has a time interval in which all its nodes’ enqueue operations overlap. + - The baskets are ordered by the order of their respective time intervals. + - For each basket, its nodes’ dequeue operations occur after its time interval. + - The dequeue operations are performed according to the order of baskets. + + Two properties define the FIFO order of nodes: + - The order of nodes in a basket is not specified. + - The order of nodes in different baskets is the FIFO-order of their respective baskets. + + In algorithms such as the MS-queue or optimistic + queue, threads enqueue items by applying a Compare-and-swap (CAS) operation to the + queue’s tail pointer, and all the threads that fail on a particular CAS operation (and also + the winner of that CAS) overlap in time. In particular, they share the time interval of + the CAS operation itself. Hence, all the threads that fail to CAS on the tail-node of + the queue may be inserted into the same basket. By integrating the basket-mechanism + as the back-off mechanism, the time usually spent on backing-off before trying to link + onto the new tail, can now be utilized to insert the failed operations into the basket, + allowing enqueues to complete sooner. In the meantime, the next successful CAS operations + by enqueues allow new baskets to be formed down the list, and these can be + filled concurrently. Moreover, the failed operations don’t retry their link attempt on the + new tail, lowering the overall contention on it. This leads to a queue + algorithm that unlike all former concurrent queue algorithms requires virtually no tuning + of the backoff mechanisms to reduce contention, making the algorithm an attractive + out-of-the-box queue. + + In order to enqueue, just as in MSQueue, a thread first tries to link the new node to + the last node. If it failed to do so, then another thread has already succeeded. Thus it + tries to insert the new node into the new basket that was created by the winner thread. + To dequeue a node, a thread first reads the head of the queue to obtain the + oldest basket. It may then dequeue any node in the oldest basket. + + Template arguments: + - \p GC - garbage collector type: gc::HP, gc::HRC, gc::PTB + - \p T - type to be stored in the queue, should be convertible to \ref single_link::node + - \p Options - options + + Type of node: \ref single_link::node + + \p Options are: + - opt::hook - hook used. Possible values are: basket_queue::base_hook, basket_queue::member_hook, basket_queue::traits_hook. + If the option is not specified, basket_queue::base_hook<> is used. + For Gidenstam's gc::HRC, only basket_queue::base_hook is supported. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. + - opt::disposer - the functor used for dispose removed items. Default is opt::v::empty_disposer. This option is used + in \ref dequeue function. + - opt::link_checker - the type of node's link fields checking. Default is \ref opt::debug_check_link + Note: for gc::HRC garbage collector, link checking policy is always selected as \ref opt::always_check_link. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter (no item counting feature) + - opt::stat - the type to gather internal statistics. + Possible option value are: \ref basket_queue::stat, \ref basket_queue::dummy_stat, + user-provided class that supports basket_queue::stat interface. + Default is \ref basket_queue::dummy_stat. + Generic option intrusive::queue_stat and intrusive::queue_dummy_stat are acceptable too, however, + they will be automatically converted to basket_queue::stat and basket_queue::dummy_stat + respectively. + - opt::alignment - the alignment for internal queue data. Default is opt::cache_line_alignment + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + + Garbage collecting schema \p GC must be consistent with the basket_queue::node GC. + + \par About item disposing + Like MSQueue, the Baskets queue algo has a key feature: even if the queue is empty it contains one item that is "dummy" one from + the standpoint of the algo. See \ref dequeue function doc for explanation. + + \par Examples + \code + #include + #include + + namespace ci = cds::inrtusive; + typedef cds::gc::HP hp_gc; + + // Basket queue with Hazard Pointer garbage collector, base hook + item disposer: + struct Foo: public ci::basket_queue::node< hp_gc > + { + // Your data + ... + }; + + // Disposer for Foo struct just deletes the object passed in + struct fooDisposer { + void operator()( Foo * p ) + { + delete p; + } + }; + + typedef ci::BasketQueue< hp_gc, + Foo + ,ci::opt::hook< + ci::basket_queue::base_hook< ci::opt::gc > + > + ,ci::opt::disposer< fooDisposer > + > fooQueue; + + // BasketQueue with Hazard Pointer garbage collector, + // member hook + item disposer + item counter, + // without alignment of internal queue data: + struct Bar + { + // Your data + ... + ci::basket_queue::node< hp_gc > hMember; + }; + + typedef ci::BasketQueue< hp_gc, + Foo + ,ci::opt::hook< + ci::basket_queue::member_hook< + offsetof(Bar, hMember) + ,ci::opt::gc + > + > + ,ci::opt::disposer< fooDisposer > + ,cds::opt::item_counter< cds::atomicity::item_counter > + ,cds::opt::alignment< cds::opt::no_special_alignment > + > barQueue; + \endcode + */ + template + class BasketQueue + { + //@cond + struct default_options + { + typedef cds::backoff::empty back_off; + typedef basket_queue::base_hook<> hook; + typedef opt::v::empty_disposer disposer; + typedef atomicity::empty_item_counter item_counter; + typedef basket_queue::dummy_stat stat; + typedef opt::v::relaxed_ordering memory_model; + static const opt::link_check_type link_checker = opt::debug_check_link; + enum { alignment = opt::cache_line_alignment }; + }; + //@endcond + + public: + //@cond + typedef typename opt::make_options< + typename cds::opt::find_type_traits< default_options, CDS_OPTIONS9 >::type + ,CDS_OPTIONS9 + >::type options; + + typedef typename std::conditional< + std::is_same >::value + ,basket_queue::stat<> + ,typename std::conditional< + std::is_same::value + ,basket_queue::dummy_stat + ,typename options::stat + >::type + >::type stat_type_; + + //@endcond + + public: + typedef T value_type ; ///< type of value stored in the queue + typedef typename options::hook hook ; ///< hook type + typedef typename hook::node_type node_type ; ///< node type + typedef typename options::disposer disposer ; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + typedef typename basket_queue::get_link_checker< node_type, options::link_checker >::type link_checker ; ///< link checker + + typedef GC gc ; ///< Garbage collector + typedef typename options::back_off back_off ; ///< back-off strategy + typedef typename options::item_counter item_counter ; ///< Item counting policy used +#ifdef CDS_DOXYGEN_INVOKED + typedef typename options::stat stat ; ///< Internal statistics policy used +#else + typedef stat_type_ stat; +#endif + typedef typename options::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + /// Rebind template arguments + template + struct rebind { + typedef BasketQueue< GC2, T2, CDS_OTHER_OPTIONS9> other ; ///< Rebinding result + }; + + static const size_t m_nHazardPtrCount = 6 ; ///< Count of hazard pointer required for the algorithm + + protected: + //@cond + + struct internal_disposer + { + void operator()( value_type * p ) + { + assert( p != null_ptr()); + + BasketQueue::clear_links( node_traits::to_node_ptr(p) ); + disposer()( p ); + } + }; + + typedef typename node_type::marked_ptr marked_ptr; + typedef typename node_type::atomic_marked_ptr atomic_marked_ptr; + + typedef intrusive::node_to_value node_to_value; + typedef typename opt::details::alignment_setter< atomic_marked_ptr, options::alignment >::type aligned_node_ptr; + typedef typename opt::details::alignment_setter< + cds::intrusive::details::dummy_node< gc, node_type>, + options::alignment + >::type dummy_node_type; + + //@endcond + + aligned_node_ptr m_pHead ; ///< Queue's head pointer (aligned) + aligned_node_ptr m_pTail ; ///< Queue's tail pointer (aligned) + + dummy_node_type m_Dummy ; ///< dummy node + item_counter m_ItemCounter ; ///< Item counter + stat m_Stat ; ///< Internal statistics + //@cond + size_t const m_nMaxHops; + //@endcond + + //@cond + + template + static marked_ptr guard_node( typename gc::template GuardArray& g, size_t idx, atomic_marked_ptr const& p ) + { + marked_ptr pg; + while ( true ) { + pg = p.load( memory_model::memory_order_relaxed ); + g.assign( idx, node_traits::to_value_ptr( pg.ptr() ) ); + if ( p.load( memory_model::memory_order_acquire) == pg ) { + return pg; + } + } + } + + static marked_ptr guard_node( typename gc::Guard& g, atomic_marked_ptr const& p ) + { + marked_ptr pg; + while ( true ) { + pg = p.load( memory_model::memory_order_relaxed ); + g.assign( node_traits::to_value_ptr( pg.ptr() ) ); + if ( p.load( memory_model::memory_order_acquire) == pg ) { + return pg; + } + } + } + + struct dequeue_result { + typename gc::template GuardArray<3> guards; + node_type * pNext; + }; + + bool do_dequeue( dequeue_result& res, bool bDeque ) + { + // Note: + // If bDeque == false then the function is called from empty method and no real dequeuing operation is performed + + back_off bkoff; + + marked_ptr h; + marked_ptr t; + marked_ptr pNext; + + while ( true ) { + h = guard_node( res.guards, 0, m_pHead ); + t = guard_node( res.guards, 1, m_pTail ); + pNext = guard_node( res.guards, 2, h->m_pNext ); + + if ( h == m_pHead.load( memory_model::memory_order_acquire ) ) { + if ( h.ptr() == t.ptr() ) { + if ( !pNext.ptr() ) + return false; + + { + typename gc::Guard g; + while ( pNext->m_pNext.load(memory_model::memory_order_relaxed).ptr() && m_pTail.load(memory_model::memory_order_relaxed) == t ) { + pNext = guard_node( g, pNext->m_pNext ); + res.guards.assign( 2, g.template get() ); + } + } + + m_pTail.compare_exchange_weak( t, marked_ptr(pNext.ptr()), memory_model::memory_order_release, memory_model::memory_order_relaxed ); + } + else { + marked_ptr iter( h ); + size_t hops = 0; + + typename gc::Guard g; + + while ( pNext.ptr() && pNext.bits() && iter.ptr() != t.ptr() && m_pHead.load(memory_model::memory_order_relaxed) == h ) { + iter = pNext; + g.assign( res.guards.template get(2) ); + pNext = guard_node( res.guards, 2, pNext->m_pNext ); + ++hops; + } + + if ( m_pHead.load(memory_model::memory_order_relaxed) != h ) + continue; + + if ( iter.ptr() == t.ptr() ) + free_chain( h, iter ); + else if ( bDeque ) { + res.pNext = pNext.ptr(); + + if ( iter->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNext.ptr(), 1 ), memory_model::memory_order_release, memory_model::memory_order_relaxed ) ) { + if ( hops >= m_nMaxHops ) + free_chain( h, pNext ); + break; + } + } + else + return true; + } + } + + if ( bDeque ) + m_Stat.onDequeueRace(); + bkoff(); + } + + if ( bDeque ) { + --m_ItemCounter; + m_Stat.onDequeue(); + } + + return true; + } + + void free_chain( marked_ptr head, marked_ptr newHead ) + { + // "head" and "newHead" are guarded + + if ( m_pHead.compare_exchange_strong( head, marked_ptr(newHead.ptr()), memory_model::memory_order_release, memory_model::memory_order_relaxed )) + { + typename gc::template GuardArray<2> guards; + guards.assign( 0, node_traits::to_value_ptr(head.ptr()) ); + while ( head.ptr() != newHead.ptr() ) { + marked_ptr pNext = guard_node( guards, 1, head->m_pNext ); + assert( pNext.bits() != 0 ); + dispose_node( head.ptr() ); + guards.assign( 0, guards.template get(1) ); + head = pNext; + } + } + } + + static void clear_links( node_type * pNode ) + { + pNode->m_pNext.store( marked_ptr( null_ptr()), memory_model::memory_order_release ); + } + + void dispose_node( node_type * p ) + { + if ( p != m_Dummy.get() ) { + gc::template retire( node_traits::to_value_ptr(p) ); + } + else + m_Dummy.retire(); + } + //@endcond + + public: + /// Initializes empty queue + BasketQueue() + : m_pHead( null_ptr() ) + , m_pTail( null_ptr() ) + , m_nMaxHops( 3 ) + { + // GC and node_type::gc must be the same + static_assert(( std::is_same::value ), "GC and node_type::gc must be the same"); + + // For cds::gc::HRC, only one base_hook is allowed + static_assert(( + std::conditional< + std::is_same::value, + std::is_same< typename hook::hook_type, opt::base_hook_tag >, + boost::true_type + >::type::value + ), "For cds::gc::HRC, only base_hook is allowed"); + + // Head/tail initialization should be made via store call + // because of gc::HRC manages reference counting + m_pHead.store( marked_ptr(m_Dummy.get()), memory_model::memory_order_relaxed ); + m_pTail.store( marked_ptr(m_Dummy.get()), memory_model::memory_order_relaxed ); + } + + /// Destructor clears the queue + /** + Since the baskets queue contains at least one item even + if the queue is empty, the destructor may call item disposer. + */ + ~BasketQueue() + { + clear(); + + node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed).ptr(); + assert( pHead != null_ptr() ); + + { + node_type * pNext = pHead->m_pNext.load( memory_model::memory_order_relaxed ).ptr(); + while ( pNext ) { + node_type * p = pNext; + pNext = pNext->m_pNext.load( memory_model::memory_order_relaxed ).ptr(); + p->m_pNext.store( marked_ptr(), memory_model::memory_order_relaxed ); + dispose_node( p ); + } + pHead->m_pNext.store( marked_ptr(), memory_model::memory_order_relaxed ); + //m_pTail.store( marked_ptr( pHead ), memory_model::memory_order_relaxed ); + } + + m_pHead.store( marked_ptr( null_ptr()), memory_model::memory_order_relaxed ); + m_pTail.store( marked_ptr( null_ptr()), memory_model::memory_order_relaxed ); + + dispose_node( pHead ); + } + + /// Returns queue's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the queue + is empty. To check queue emptyness use \ref empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns reference to internal statistics + const stat& statistics() const + { + return m_Stat; + } + + /// Enqueues \p val value into the queue. + /** @anchor cds_intrusive_BasketQueue_enqueue + The function always returns \p true. + */ + bool enqueue( value_type& val ) + { + node_type * pNew = node_traits::to_node_ptr( val ); + link_checker::is_empty( pNew ); + + typename gc::Guard guard; + back_off bkoff; + + marked_ptr t; + while ( true ) { + t = guard_node( guard, m_pTail ); + + marked_ptr pNext = t->m_pNext.load(memory_model::memory_order_acquire ); + + if ( pNext.ptr() == null_ptr() ) { + pNew->m_pNext.store( marked_ptr(), memory_model::memory_order_release ); + if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr(pNew), memory_model::memory_order_release, memory_model::memory_order_relaxed ) ) { + if ( !m_pTail.compare_exchange_strong( t, marked_ptr(pNew), memory_model::memory_order_release, memory_model::memory_order_relaxed )) + m_Stat.onAdvanceTailFailed(); + break; + } + + // Try adding to basket + m_Stat.onTryAddBasket(); + + // Reread tail next + typename gc::Guard gNext; + + try_again: + pNext = guard_node( gNext, t->m_pNext ); + + // add to the basket + if ( m_pTail.load(memory_model::memory_order_relaxed) == t + && t->m_pNext.load( memory_model::memory_order_relaxed) == pNext + && !pNext.bits() ) + { + bkoff(); + pNew->m_pNext.store( pNext, memory_model::memory_order_release ); + if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNew ), memory_model::memory_order_release, memory_model::memory_order_relaxed )) { + m_Stat.onAddBasket(); + break; + } + goto try_again; + } + } + else { + // Tail is misplaced, advance it + + typename gc::template GuardArray<2> g; + g.assign( 0, node_traits::to_value_ptr( pNext.ptr() ) ); + if ( t->m_pNext.load( memory_model::memory_order_relaxed ) != pNext ) { + m_Stat.onEnqueueRace(); + bkoff(); + continue; + } + + marked_ptr p; + bool bTailOk = true; + while ( ( p = pNext->m_pNext.load(memory_model::memory_order_relaxed) ).ptr() != null_ptr() ) + { + bTailOk = m_pTail.load( memory_model::memory_order_relaxed ) == t; + if ( !bTailOk ) + break; + + g.assign( 1, node_traits::to_value_ptr( p.ptr() )); + if ( pNext->m_pNext.load(memory_model::memory_order_relaxed) != p ) + continue; + pNext = p; + g.assign( 0, g.template get( 1 ) ); + } + if ( !bTailOk || !m_pTail.compare_exchange_weak( t, marked_ptr( pNext.ptr() ), memory_model::memory_order_release, memory_model::memory_order_relaxed )) + m_Stat.onAdvanceTailFailed(); + + m_Stat.onBadTail(); + } + + m_Stat.onEnqueueRace(); + } + + ++m_ItemCounter; + m_Stat.onEnqueue(); + + return true; + } + + /// Dequeues a value from the queue + /** @anchor cds_intrusive_BasketQueue_dequeue + If the queue is empty the function returns \p NULL. + + Warning: see MSQueue::deque note about item disposing + */ + value_type * dequeue() + { + dequeue_result res; + + if ( do_dequeue( res, true )) + return node_traits::to_value_ptr( *res.pNext ); + return null_ptr(); + } + + /// Synonym for \ref cds_intrusive_BasketQueue_enqueue "enqueue" function + bool push( value_type& val ) + { + return enqueue( val ); + } + + /// Synonym for \ref cds_intrusive_BasketQueue_dequeue "dequeue" function + value_type * pop() + { + return dequeue(); + } + + /// Checks if the queue is empty + /** + Note that this function is not \p const. + The function is based on \ref dequeue algorithm + but really does not dequeued any item. + */ + bool empty() + { + dequeue_result res; + return !do_dequeue( res, false ); + } + + /// Clear the queue + /** + The function repeatedly calls \ref dequeue until it returns \p NULL. + The disposer defined in template \p Options is called for each item + that can be safely disposed. + */ + void clear() + { + while ( dequeue() ); + } + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_BASKET_QUEUE_H diff --git a/cds/intrusive/cuckoo_set.h b/cds/intrusive/cuckoo_set.h new file mode 100644 index 00000000..99d1c4be --- /dev/null +++ b/cds/intrusive/cuckoo_set.h @@ -0,0 +1,2878 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_CUCKOO_SET_H +#define __CDS_INTRUSIVE_CUCKOO_SET_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +//#include + +namespace cds { namespace intrusive { + + /// CuckooSet-related definitions + namespace cuckoo { + + /// Option to define probeset type + /** + The option specifies probeset type for the CuckooSet. + Available values: + - \p cds::intrusive::cuckoo::list - the probeset is a single-linked list. + The node contains pointer to next node in probeset. + - \p cds::intrusive::cuckoo::vector - the probeset is a vector + with constant-size \p Capacity where \p Capacity is an unsigned int constant. + The node does not contain any auxiliary data. + */ + template + struct probeset_type + { + //@cond + template + struct pack: public Base { + typedef Type probeset_type; + }; + //@endcond + }; + + /// Option specifying whether to store hash values in the node + /** + This option reserves additional space in the hook to store the hash value of the object once it's introduced in the container. + When this option is used, the unordered container will store the calculated hash value in the hook and rehashing operations won't need + to recalculate the hash of the value. This option will improve the performance of unordered containers + when rehashing is frequent or hashing the value is a slow operation + + The \p Count template parameter defines the size of hash array. Remember that cuckoo hashing implies at least two + hash values per item. + + Possible values of \p Count: + - 0 - no hash storing in the node + - greater that 1 - store hash values. + + Value 1 is deprecated. + */ + template + struct store_hash + { + //@cond + template + struct pack: public Base { + static unsigned int const store_hash = Count; + }; + //@endcond + }; + + + //@cond + // Probeset type placeholders + struct list_probeset_class; + struct vector_probeset_class; + //@endcond + + //@cond + // Probeset type declarations. + struct list; + template + struct vector + { + static unsigned int const c_nCapacity = Capacity; + }; + //@endcond + + /// CuckooSet node + /** + Template arguments: + - \p ProbesetType - type of probeset. Can be \p cds::intrusive::cuckoo::list + or \p cds::intrusive::cuckoo::vector. + - \p StoreHashCount - constant that defines whether to store node hash values. + See cuckoo::store_hash option for explanation + - Tag - a tag used to distinguish between different implementation when two or more + \p node is needed in single struct. + */ + template + struct node +#ifdef CDS_DOXYGEN_INVOKED + { + typedef ProbesetType probeset_type ; ///< Probeset type + typedef Tag tag ; ///< Tag + static unsigned int const hash_array_size = StoreHashCount ; ///< The size of hash array + } +#endif +; + + //@cond + template + struct node< cuckoo::list, 0, Tag> + { + typedef list_probeset_class probeset_class; + typedef cuckoo::list probeset_type; + typedef Tag tag; + static unsigned int const hash_array_size = 0; + static unsigned int const probeset_size = 0; + + node * m_pNext; + + CDS_CONSTEXPR node() CDS_NOEXCEPT + : m_pNext( null_ptr() ) + {} + + void store_hash( size_t * ) + {} + + size_t * get_hash() const + { + // This node type does not store hash values!!! + assert(false); + return null_ptr(); + } + + void clear() + { + m_pNext = null_ptr(); + } + }; + + template + struct node< cuckoo::list, StoreHashCount, Tag> + { + typedef list_probeset_class probeset_class; + typedef cuckoo::list probeset_type; + typedef Tag tag; + static unsigned int const hash_array_size = StoreHashCount; + static unsigned int const probeset_size = 0; + + node * m_pNext; + size_t m_arrHash[ hash_array_size ]; + + node() CDS_NOEXCEPT + : m_pNext( null_ptr() ) + { + memset( m_arrHash, 0, sizeof(m_arrHash)); + } + + void store_hash( size_t * pHashes ) + { + memcpy( m_arrHash, pHashes, hash_array_size ); + } + + size_t * get_hash() const + { + return const_cast( m_arrHash ); + } + + void clear() + { + m_pNext = null_ptr(); + } + + }; + + template + struct node< cuckoo::vector, 0, Tag> + { + typedef vector_probeset_class probeset_class; + typedef cuckoo::vector probeset_type; + typedef Tag tag; + static unsigned int const hash_array_size = 0; + static unsigned int const probeset_size = probeset_type::c_nCapacity; + + node() CDS_NOEXCEPT + {} + + void store_hash( size_t * ) + {} + + size_t * get_hash() const + { + // This node type does not store hash values!!! + assert(false); + return null_ptr(); + } + + void clear() + {} + }; + + template + struct node< cuckoo::vector, StoreHashCount, Tag> + { + typedef vector_probeset_class probeset_class; + typedef cuckoo::vector probeset_type; + typedef Tag tag; + static unsigned int const hash_array_size = StoreHashCount; + static unsigned int const probeset_size = probeset_type::c_nCapacity; + + size_t m_arrHash[ hash_array_size ]; + + node() CDS_NOEXCEPT + { + memset( m_arrHash, 0, sizeof(m_arrHash)); + } + + void store_hash( size_t * pHashes ) + { + memcpy( m_arrHash, pHashes, hash_array_size ); + } + + size_t * get_hash() const + { + return const_cast( m_arrHash ); + } + + void clear() + {} + }; + //@endcond + + + //@cond + struct default_hook { + typedef cuckoo::list probeset_type; + static unsigned int const store_hash = 0; + typedef opt::none tag; + }; + + template < typename HookType, CDS_DECL_OPTIONS3> + struct hook + { + typedef typename opt::make_options< default_hook, CDS_OPTIONS3>::type options; + + typedef typename options::probeset_type probeset_type; + typedef typename options::tag tag; + static unsigned int const store_hash = options::store_hash; + + typedef node node_type; + + typedef HookType hook_type; + }; + //@endcond + + /// Base hook + /** + \p Options are: + - cuckoo::probeset_type - probeset type. Defaul is \p cuckoo::list + - cuckoo::store_hash - store hash values in the node or not. Default is 0 (no storing) + - opt::tag - tag to distinguish different nodes in one struct. Default is opt::none + */ + template < CDS_DECL_OPTIONS3 > + struct base_hook: public hook< opt::base_hook_tag, CDS_OPTIONS3 > + {}; + + /// Member hook + /** + \p MemberOffset defines offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - cuckoo::probeset_type - probeset type. Defaul is \p cuckoo::list + - cuckoo::store_hash - store hash values in the node or not. Default is 0 (no storing) + - opt::tag - tag to distinguish different nodes in one struct. Default is opt::none + */ + template < size_t MemberOffset, CDS_DECL_OPTIONS3 > + struct member_hook: public hook< opt::member_hook_tag, CDS_OPTIONS3 > + { + //@cond + static const size_t c_nMemberOffset = MemberOffset; + //@endcond + }; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - cuckoo::probeset_type - probeset type. Defaul is \p cuckoo::list + - cuckoo::store_hash - store hash values in the node or not. Default is 0 (no storing) + - opt::tag - tag to distinguish different nodes in one struct. Default is opt::none + */ + template + struct traits_hook: public hook< opt::traits_hook_tag, CDS_OPTIONS3 > + { + //@cond + typedef NodeTraits node_traits; + //@endcond + }; + + /// Internal statistics for \ref striping mutex policy + struct striping_stat { + typedef cds::atomicity::event_counter counter_type; ///< Counter type + + counter_type m_nCellLockCount ; ///< Count of obtaining cell lock + counter_type m_nCellTryLockCount ; ///< Count of cell \p try_lock attempts + counter_type m_nFullLockCount ; ///< Count of obtaining full lock + counter_type m_nResizeLockCount ; ///< Count of obtaining resize lock + counter_type m_nResizeCount ; ///< Count of resize event + + //@cond + void onCellLock() { ++m_nCellLockCount; } + void onCellTryLock() { ++m_nCellTryLockCount; } + void onFullLock() { ++m_nFullLockCount; } + void onResizeLock() { ++m_nResizeLockCount; } + void onResize() { ++m_nResizeCount; } + //@endcond + }; + + /// Dummy internal statistics for \ref striping mutex policy + struct empty_striping_stat { + //@cond + void onCellLock() const {} + void onCellTryLock() const {} + void onFullLock() const {} + void onResizeLock() const {} + void onResize() const {} + //@endcond + }; + + /// Lock striping concurrent access policy + /** + This is one of available opt::mutex_policy option type for CuckooSet + + Lock striping is very simple technique. + The cuckoo set consists of the bucket tables and the array of locks. + There is single lock array for each bucket table, at least, the count of bucket table is 2. + Initially, the capacity of lock array and each bucket table is the same. + When set is resized, bucket table capacity will be doubled but lock array will not. + The lock \p i protects each bucket \p j, where j = i mod L , + where \p L - the size of lock array. + + The policy contains an internal array of \p RecursiveLock locks. + + Template arguments: + - \p RecursiveLock - the type of recursive mutex. The default is \p cds_std::recursive_mutex. The mutex type should be default-constructible. + Note that a recursive spin-lock is not suitable for lock striping for performance reason. + - \p Arity - unsigned int constant that specifies an arity. The arity is the count of hash functors, i.e., the + count of lock arrays. Default value is 2. + - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR. + - \p Stat - internal statistics type. Note that this template argument is automatically selected by \ref CuckooSet + class according to its \p opt::stat option. + */ + template < + class RecursiveLock = cds_std::recursive_mutex, + unsigned int Arity = 2, + class Alloc = CDS_DEFAULT_ALLOCATOR, + class Stat = empty_striping_stat + > + class striping + { + public: + typedef RecursiveLock lock_type ; ///< lock type + typedef Alloc allocator_type ; ///< allocator type + static unsigned int const c_nArity = Arity ; ///< the arity + typedef Stat statistics_type ; ///< Internal statistics type (\ref striping_stat or \ref empty_striping_stat) + + //@cond + typedef striping_stat real_stat; + typedef empty_striping_stat empty_stat; + + template + struct rebind_statistics { + typedef striping other; + }; + //@endcond + + typedef cds::lock::array< lock_type, cds::lock::pow2_select_policy, allocator_type > lock_array_type ; ///< lock array type + + protected: + //@cond + class lock_array: public lock_array_type + { + public: + // placeholder ctor + lock_array(): lock_array_type( typename lock_array_type::select_cell_policy(2) ) {} + + // real ctor + lock_array( size_t nCapacity ): lock_array_type( nCapacity, typename lock_array_type::select_cell_policy(nCapacity) ) {} + }; + + class scoped_lock: public cds::lock::scoped_lock< lock_array_type > + { + typedef cds::lock::scoped_lock< lock_array_type > base_class; + public: + scoped_lock( lock_array& arrLock, size_t nHash ): base_class( arrLock, nHash ) {} + }; + //@endcond + + protected: + //@cond + lock_array m_Locks[c_nArity] ; ///< array of lock_array_type + statistics_type m_Stat ; ///< internal statistics + //@endcond + + public: + //@cond + class scoped_cell_lock { + lock_type * m_guard[c_nArity]; + + public: + scoped_cell_lock( striping& policy, size_t const* arrHash ) + { + for ( unsigned int i = 0; i < c_nArity; ++i ) { + m_guard[i] = &( policy.m_Locks[i].at( policy.m_Locks[i].lock( arrHash[i] ))); + } + policy.m_Stat.onCellLock(); + } + + ~scoped_cell_lock() + { + for ( unsigned int i = 0; i < c_nArity; ++i ) + m_guard[i]->unlock(); + } + }; + + class scoped_cell_trylock + { + typedef typename lock_array_type::lock_type lock_type; + + lock_type * m_guard[c_nArity]; + bool m_bLocked; + + public: + scoped_cell_trylock( striping& policy, size_t const* arrHash ) + { + size_t nCell = policy.m_Locks[0].try_lock( arrHash[0] ); + m_bLocked = nCell != lock_array_type::c_nUnspecifiedCell; + if ( m_bLocked ) { + m_guard[0] = &(policy.m_Locks[0].at(nCell)); + for ( unsigned int i = 1; i < c_nArity; ++i ) { + m_guard[i] = &( policy.m_Locks[i].at( policy.m_Locks[i].lock( arrHash[i] )) ); + } + } + else { + std::fill( m_guard, m_guard + c_nArity, null_ptr() ); + } + policy.m_Stat.onCellTryLock(); + } + ~scoped_cell_trylock() + { + if ( m_bLocked ) { + for ( unsigned int i = 0; i < c_nArity; ++i ) + m_guard[i]->unlock(); + } + } + + bool locked() const + { + return m_bLocked; + } + }; + + class scoped_full_lock { + cds::lock::scoped_lock< lock_array_type > m_guard; + public: + scoped_full_lock( striping& policy ) + : m_guard( policy.m_Locks[0] ) + { + policy.m_Stat.onFullLock(); + } + + /// Ctor for scoped_resize_lock - no statistics is incremented + scoped_full_lock( striping& policy, bool ) + : m_guard( policy.m_Locks[0] ) + {} + }; + + class scoped_resize_lock: public scoped_full_lock { + public: + scoped_resize_lock( striping& policy ) + : scoped_full_lock( policy, false ) + { + policy.m_Stat.onResizeLock(); + } + }; + //@endcond + + public: + /// Constructor + striping( + size_t nLockCount ///< The size of lock array. Must be power of two. + ) + { + // Trick: initialize the array of locks + for ( unsigned int i = 0; i < c_nArity; ++i ) { + lock_array * pArr = m_Locks + i; + pArr->lock_array::~lock_array(); + new ( pArr ) lock_array( nLockCount ); + } + } + + /// Returns lock array size + /** + Lock array size is unchanged during \p striping object lifetime + */ + size_t lock_count() const + { + return m_Locks[0].size(); + } + + //@cond + void resize( size_t ) + { + m_Stat.onResize(); + } + //@endcond + + /// Returns the arity of striping mutex policy + CDS_CONSTEXPR unsigned int arity() const CDS_NOEXCEPT + { + return c_nArity; + } + + /// Returns internal statistics + statistics_type const& statistics() const + { + return m_Stat; + } + }; + + /// Internal statistics for \ref refinable mutex policy + struct refinable_stat { + typedef cds::atomicity::event_counter counter_type ; ///< Counter type + + counter_type m_nCellLockCount ; ///< Count of obtaining cell lock + counter_type m_nCellLockWaitResizing ; ///< Count of loop iteration to wait for resizing + counter_type m_nCellLockArrayChanged ; ///< Count of event "Lock array has been changed when obtaining cell lock" + counter_type m_nCellLockFailed ; ///< Count of event "Cell lock failed because of the array is owned by other thread" + + counter_type m_nSecondCellLockCount ; ///< Count of obtaining cell lock when another cell is already locked + counter_type m_nSecondCellLockFailed ; ///< Count of unsuccess obtaining cell lock when another cell is already locked + + counter_type m_nFullLockCount ; ///< Count of obtaining full lock + counter_type m_nFullLockIter ; ///< Count of unsuccessfull iteration to obtain full lock + + counter_type m_nResizeLockCount ; ///< Count of obtaining resize lock + counter_type m_nResizeLockIter ; ///< Count of unsuccessfull iteration to obtain resize lock + counter_type m_nResizeLockArrayChanged; ///< Count of event "Lock array has been changed when obtaining resize lock" + counter_type m_nResizeCount ; ///< Count of resize event + + //@cond + void onCellLock() { ++m_nCellLockCount; } + void onCellWaitResizing() { ++m_nCellLockWaitResizing; } + void onCellArrayChanged() { ++m_nCellLockArrayChanged; } + void onCellLockFailed() { ++m_nCellLockFailed; } + void onSecondCellLock() { ++m_nSecondCellLockCount; } + void onSecondCellLockFailed() { ++m_nSecondCellLockFailed; } + void onFullLock() { ++m_nFullLockCount; } + void onFullLockIter() { ++m_nFullLockIter; } + void onResizeLock() { ++m_nResizeLockCount; } + void onResizeLockIter() { ++m_nResizeLockIter; } + void onResizeLockArrayChanged() { ++m_nResizeLockArrayChanged; } + void onResize() { ++m_nResizeCount; } + //@endcond + }; + + /// Dummy internal statistics for \ref refinable mutex policy + struct empty_refinable_stat { + //@cond + void onCellLock() const {} + void onCellWaitResizing() const {} + void onCellArrayChanged() const {} + void onCellLockFailed() const {} + void onSecondCellLock() const {} + void onSecondCellLockFailed() const {} + void onFullLock() const {} + void onFullLockIter() const {} + void onResizeLock() const {} + void onResizeLockIter() const {} + void onResizeLockArrayChanged() const {} + void onResize() const {} + //@endcond + }; + + /// Refinable concurrent access policy + /** + This is one of available opt::mutex_policy option type for CuckooSet + + Refining is like a striping technique (see cuckoo::striping) + but it allows growing the size of lock array when resizing the hash table. + So, the sizes of hash table and lock array are equal. + + Template arguments: + - \p RecursiveLock - the type of mutex. Reentrant (recursive) mutex is required. + The default is \p cds_std::recursive_mutex. The mutex type should be default-constructible. + - \p Arity - unsigned int constant that specifies an arity. The arity is the count of hash functors, i.e., the + count of lock arrays. Default value is 2. + - \p BackOff - back-off strategy. Default is cds::backoff::yield + - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR. + - \p Stat - internal statistics type. Note that this template argument is automatically selected by \ref CuckooSet + class according to its \p opt::stat option. + */ + template < + class RecursiveLock = cds_std::recursive_mutex, + unsigned int Arity = 2, + typename BackOff = cds::backoff::yield, + class Alloc = CDS_DEFAULT_ALLOCATOR, + class Stat = empty_refinable_stat + > + class refinable + { + public: + typedef RecursiveLock lock_type ; ///< lock type + typedef Alloc allocator_type ; ///< allocator type + typedef BackOff back_off ; ///< back-off strategy + typedef Stat statistics_type ; ///< internal statistics type + static unsigned int const c_nArity = Arity; ///< the arity + + //@cond + typedef refinable_stat real_stat; + typedef empty_refinable_stat empty_stat; + + template + struct rebind_statistics { + typedef refinable< lock_type, c_nArity, back_off, allocator_type, Stat2> other; + }; + //@endcond + + protected: + //@cond + typedef cds::lock::trivial_select_policy lock_selection_policy; + + class lock_array_type + : public cds::lock::array< lock_type, lock_selection_policy, allocator_type > + , public std::enable_shared_from_this< lock_array_type > + { + typedef cds::lock::array< lock_type, lock_selection_policy, allocator_type > lock_array_base; + public: + lock_array_type( size_t nCapacity ) + : lock_array_base( nCapacity ) + {} + }; + typedef std::shared_ptr< lock_array_type > lock_array_ptr; + typedef cds::details::Allocator< lock_array_type, allocator_type > lock_array_allocator; + + typedef unsigned long long owner_t; + typedef cds::OS::ThreadId threadId_t; + + typedef cds::lock::Spin spinlock_type; + typedef cds::lock::scoped_lock< spinlock_type > scoped_spinlock; + //@endcond + + protected: + //@cond + static owner_t const c_nOwnerMask = (((owner_t) 1) << (sizeof(owner_t) * 8 - 1)) - 1; + + CDS_ATOMIC::atomic< owner_t > m_Owner ; ///< owner mark (thread id + boolean flag) + CDS_ATOMIC::atomic m_nCapacity ; ///< lock array capacity + lock_array_ptr m_arrLocks[ c_nArity ] ; ///< Lock array. The capacity of array is specified in constructor. + spinlock_type m_access ; ///< access to m_arrLocks + statistics_type m_Stat ; ///< internal statistics + //@endcond + + protected: + //@cond + struct lock_array_disposer { + void operator()( lock_array_type * pArr ) + { + lock_array_allocator().Delete( pArr ); + } + }; + + lock_array_ptr create_lock_array( size_t nCapacity ) + { + return lock_array_ptr( lock_array_allocator().New( nCapacity ), lock_array_disposer() ); + } + + void acquire( size_t const * arrHash, lock_array_ptr * pLockArr, lock_type ** parrLock ) + { + owner_t me = (owner_t) cds::OS::getCurrentThreadId(); + owner_t who; + + back_off bkoff; + while ( true ) { + + { + scoped_spinlock sl(m_access); + for ( unsigned int i = 0; i < c_nArity; ++i ) + pLockArr[i] = m_arrLocks[i]; + } + + // wait while resizing + while ( true ) { + who = m_Owner.load( CDS_ATOMIC::memory_order_acquire ); + if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) ) + break; + bkoff(); + m_Stat.onCellWaitResizing(); + } + + if ( pLockArr[0] != m_arrLocks[0] ) { + m_Stat.onCellArrayChanged(); + } + else { + + size_t const nMask = pLockArr[0]->size() - 1; + assert( cds::beans::is_power2( nMask + 1 )); + + for ( unsigned int i = 0; i < c_nArity; ++i ) { + parrLock[i] = &( pLockArr[i]->at( arrHash[i] & nMask )); + parrLock[i]->lock(); + } + + who = m_Owner.load( CDS_ATOMIC::memory_order_acquire ); + if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks[0] == pLockArr[0] ) { + m_Stat.onCellLock(); + return; + } + + for ( unsigned int i = 0; i < c_nArity; ++i ) { + parrLock[i]->unlock(); + } + + m_Stat.onCellLockFailed(); + } + + // clears pLockArr can lead to calling dtor for each item of pLockArr[i] that may be a heavy-weighted operation + // (each pLockArr[i] is a shared pointer to array of a ton of mutexes) + // It is better to do this before the next loop iteration where we will use spin-locked assignment to pLockArr + // Destructing a lot of mutexes under spin-lock is a bad solution + for ( unsigned int i = 0; i < c_nArity; ++i ) + pLockArr[i].reset(); + } + } + + bool try_second_acquire( size_t const * arrHash, lock_type ** parrLock ) + { + // It is assumed that the current thread already has a lock + // and requires a second lock for other hash + + size_t const nMask = m_nCapacity.load(CDS_ATOMIC::memory_order_acquire) - 1; + size_t nCell = m_arrLocks[0]->try_lock( arrHash[0] & nMask); + if ( nCell == lock_array_type::c_nUnspecifiedCell ) { + m_Stat.onSecondCellLockFailed(); + return false; + } + parrLock[0] = &(m_arrLocks[0]->at(nCell)); + + for ( unsigned int i = 1; i < c_nArity; ++i ) { + parrLock[i] = &( m_arrLocks[i]->at( m_arrLocks[i]->lock( arrHash[i] & nMask)) ); + } + + m_Stat.onSecondCellLock(); + return true; + } + + void acquire_all() + { + owner_t me = (owner_t) cds::OS::getCurrentThreadId(); + + back_off bkoff; + while ( true ) { + owner_t ownNull = 0; + if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed )) { + m_arrLocks[0]->lock_all(); + + m_Stat.onFullLock(); + return; + } + bkoff(); + m_Stat.onFullLockIter(); + } + } + + void release_all() + { + m_arrLocks[0]->unlock_all(); + m_Owner.store( 0, CDS_ATOMIC::memory_order_release ); + } + + void acquire_resize( lock_array_ptr * pOldLocks ) + { + owner_t me = (owner_t) cds::OS::getCurrentThreadId(); + + while ( true ) { + { + scoped_spinlock sl(m_access); + for ( unsigned int i = 0; i < c_nArity; ++i ) + pOldLocks[i] = m_arrLocks[i]; + } + + // global lock + owner_t ownNull = 0; + if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed )) { + if ( pOldLocks[0] != m_arrLocks[0] ) { + m_Owner.store( 0, CDS_ATOMIC::memory_order_release ); + m_Stat.onResizeLockArrayChanged(); + } + else { + pOldLocks[0]->lock_all(); + m_Stat.onResizeLock(); + return; + } + } + else + m_Stat.onResizeLockIter(); + + // clears pOldLocks can lead to calling dtor for each item of pOldLocks[i] that may be a heavy-weighted operation + // (each pOldLocks[i] is a shared pointer to array of a ton of mutexes) + // It is better to do this before the next loop iteration where we will use spin-locked assignment to pOldLocks + // Destructing a lot of mutexes under spin-lock is a bad solution + for ( unsigned int i = 0; i < c_nArity; ++i ) + pOldLocks[i].reset(); + } + } + + void release_resize( lock_array_ptr * pOldLocks ) + { + m_Owner.store( 0, CDS_ATOMIC::memory_order_release ); + pOldLocks[0]->unlock_all(); + } + //@endcond + + public: + //@cond + class scoped_cell_lock { + lock_type * m_arrLock[ c_nArity ]; + lock_array_ptr m_arrLockArr[ c_nArity ]; + + public: + scoped_cell_lock( refinable& policy, size_t const* arrHash ) + { + policy.acquire( arrHash, m_arrLockArr, m_arrLock ); + } + + ~scoped_cell_lock() + { + for ( unsigned int i = 0; i < c_nArity; ++i ) + m_arrLock[i]->unlock(); + } + }; + + class scoped_cell_trylock { + lock_type * m_arrLock[ c_nArity ]; + bool m_bLocked; + + public: + scoped_cell_trylock( refinable& policy, size_t const* arrHash ) + { + m_bLocked = policy.try_second_acquire( arrHash, m_arrLock ); + } + + ~scoped_cell_trylock() + { + if ( m_bLocked ) { + for ( unsigned int i = 0; i < c_nArity; ++i ) + m_arrLock[i]->unlock(); + } + } + + bool locked() const + { + return m_bLocked; + } + }; + + class scoped_full_lock { + refinable& m_policy; + public: + scoped_full_lock( refinable& policy ) + : m_policy( policy ) + { + policy.acquire_all(); + } + ~scoped_full_lock() + { + m_policy.release_all(); + } + }; + + class scoped_resize_lock + { + refinable& m_policy; + lock_array_ptr m_arrLocks[ c_nArity ]; + public: + scoped_resize_lock( refinable& policy ) + : m_policy(policy) + { + policy.acquire_resize( m_arrLocks ); + } + ~scoped_resize_lock() + { + m_policy.release_resize( m_arrLocks ); + } + }; + //@endcond + + public: + /// Constructor + refinable( + size_t nLockCount ///< The size of lock array. Must be power of two. + ) : m_Owner(0) + , m_nCapacity( nLockCount ) + { + assert( cds::beans::is_power2( nLockCount )); + for ( unsigned int i = 0; i < c_nArity; ++i ) + m_arrLocks[i] = create_lock_array( nLockCount ); + } + + //@cond + void resize( size_t nCapacity ) + { + lock_array_ptr pNew[ c_nArity ]; + for ( unsigned int i = 0; i < c_nArity; ++i ) + pNew[i] = create_lock_array( nCapacity ); + + /* + // Assignment m_arrLocks[i] = pNew[i] may call heavy-weighted dtor for each item of m_arrLocks + // that is unacceptable under spin-lock + // So, we store copy of m_arrLocks in pOld + lock_array_ptr pOld[ c_nArity ]; + for ( unsigned int i = 0; i < c_nArity; ++i ) + pOld[i] = m_arrLocks[i]; + + // m_arrLocks assignment will not lead to calling dtor of each item of m_arrLocks + // since copy of m_arrLocks locates in pOld and assignment will not be too painful for spin-lock + */ + + { + scoped_spinlock sl(m_access); + for ( unsigned int i = 0; i < c_nArity; ++i ) + m_arrLocks[i] = pNew[i]; + } + m_nCapacity.store( nCapacity, CDS_ATOMIC::memory_order_release ); + + m_Stat.onResize(); + } + //@endcond + + /// Returns lock array size + /** + Lock array size is not a constant for \p refinable policy and can be changed when the set is resized. + */ + size_t lock_count() const + { + return m_nCapacity.load(CDS_ATOMIC::memory_order_relaxed); + } + + /// Returns the arity of \p refinable mutex policy + CDS_CONSTEXPR unsigned int arity() const CDS_NOEXCEPT + { + return c_nArity; + } + + /// Returns internal statistics + statistics_type const& statistics() const + { + return m_Stat; + } + }; + + /// CuckooSet internal statistics + struct stat { + typedef cds::atomicity::event_counter counter_type ; ///< Counter type + + counter_type m_nRelocateCallCount ; ///< Count of \p relocate function call + counter_type m_nRelocateRoundCount ; ///< Count of attempts to relocate items + counter_type m_nFalseRelocateCount ; ///< Count of unneeded attempts of \p relocate call + counter_type m_nSuccessRelocateCount ; ///< Count of successfull item relocating + counter_type m_nRelocateAboveThresholdCount; ///< Count of item relocating above probeset threshold + counter_type m_nFailedRelocateCount ; ///< Count of failed relocation attemp (when all probeset is full) + + counter_type m_nResizeCallCount ; ///< Count of \p resize function call + counter_type m_nFalseResizeCount ; ///< Count of false \p resize function call (when other thread has been resized the set) + counter_type m_nResizeSuccessNodeMove; ///< Count of successfull node moving when resizing + counter_type m_nResizeRelocateCall ; ///< Count of \p relocate function call from \p resize function + + counter_type m_nInsertSuccess ; ///< Count of successfull \p insert function call + counter_type m_nInsertFailed ; ///< Count of failed \p insert function call + counter_type m_nInsertResizeCount ; ///< Count of \p resize function call from \p insert + counter_type m_nInsertRelocateCount ; ///< Count of \p relocate function call from \p insert + counter_type m_nInsertRelocateFault ; ///< Count of failed \p relocate function call from \p insert + + counter_type m_nEnsureExistCount ; ///< Count of call \p ensure function for existing node + counter_type m_nEnsureSuccessCount ; ///< Count of successfull \p insert function call for new node + counter_type m_nEnsureResizeCount ; ///< Count of \p resize function call from \p ensure + counter_type m_nEnsureRelocateCount ; ///< Count of \p relocate function call from \p ensure + counter_type m_nEnsureRelocateFault ; ///< Count of failed \p relocate function call from \p ensure + + counter_type m_nUnlinkSuccess ; ///< Count of success \p unlink function call + counter_type m_nUnlinkFailed ; ///< Count of failed \p unlink function call + + counter_type m_nEraseSuccess ; ///< Count of success \p erase function call + counter_type m_nEraseFailed ; ///< Count of failed \p erase function call + + counter_type m_nFindSuccess ; ///< Count of success \p find function call + counter_type m_nFindFailed ; ///< Count of failed \p find function call + + counter_type m_nFindEqualSuccess ; ///< Count of success \p find_equal function call + counter_type m_nFindEqualFailed ; ///< Count of failed \p find_equal function call + + counter_type m_nFindWithSuccess ; ///< Count of success \p find_with function call + counter_type m_nFindWithFailed ; ///< Count of failed \p find_with function call + + //@cond + void onRelocateCall() { ++m_nRelocateCallCount; } + void onRelocateRound() { ++m_nRelocateRoundCount; } + void onFalseRelocateRound() { ++m_nFalseRelocateCount; } + void onSuccessRelocateRound(){ ++m_nSuccessRelocateCount; } + void onRelocateAboveThresholdRound() { ++m_nRelocateAboveThresholdCount; } + void onFailedRelocate() { ++m_nFailedRelocateCount; } + + void onResizeCall() { ++m_nResizeCallCount; } + void onFalseResizeCall() { ++m_nFalseResizeCount; } + void onResizeSuccessMove() { ++m_nResizeSuccessNodeMove; } + void onResizeRelocateCall() { ++m_nResizeRelocateCall; } + + void onInsertSuccess() { ++m_nInsertSuccess; } + void onInsertFailed() { ++m_nInsertFailed; } + void onInsertResize() { ++m_nInsertResizeCount; } + void onInsertRelocate() { ++m_nInsertRelocateCount; } + void onInsertRelocateFault() { ++m_nInsertRelocateFault; } + + void onEnsureExist() { ++m_nEnsureExistCount; } + void onEnsureSuccess() { ++m_nEnsureSuccessCount; } + void onEnsureResize() { ++m_nEnsureResizeCount; } + void onEnsureRelocate() { ++m_nEnsureRelocateCount; } + void onEnsureRelocateFault() { ++m_nEnsureRelocateFault; } + + void onUnlinkSuccess() { ++m_nUnlinkSuccess; } + void onUnlinkFailed() { ++m_nUnlinkFailed; } + + void onEraseSuccess() { ++m_nEraseSuccess; } + void onEraseFailed() { ++m_nEraseFailed; } + + void onFindSuccess() { ++m_nFindSuccess; } + void onFindFailed() { ++m_nFindFailed; } + + void onFindWithSuccess() { ++m_nFindWithSuccess; } + void onFindWithFailed() { ++m_nFindWithFailed; } + //@endcond + }; + + /// CuckooSet empty internal statistics + struct empty_stat { + //@cond + void onRelocateCall() const {} + void onRelocateRound() const {} + void onFalseRelocateRound() const {} + void onSuccessRelocateRound()const {} + void onRelocateAboveThresholdRound() const {} + void onFailedRelocate() const {} + + void onResizeCall() const {} + void onFalseResizeCall() const {} + void onResizeSuccessMove() const {} + void onResizeRelocateCall() const {} + + void onInsertSuccess() const {} + void onInsertFailed() const {} + void onInsertResize() const {} + void onInsertRelocate() const {} + void onInsertRelocateFault() const {} + + void onEnsureExist() const {} + void onEnsureSuccess() const {} + void onEnsureResize() const {} + void onEnsureRelocate() const {} + void onEnsureRelocateFault() const {} + + void onUnlinkSuccess() const {} + void onUnlinkFailed() const {} + + void onEraseSuccess() const {} + void onEraseFailed() const {} + + void onFindSuccess() const {} + void onFindFailed() const {} + + void onFindWithSuccess() const {} + void onFindWithFailed() const {} + //@endcond + }; + + /// Type traits for CuckooSet class + struct type_traits + { + /// Hook used + /** + Possible values are: cuckoo::base_hook, cuckoo::member_hook, cuckoo::traits_hook. + */ + typedef base_hook<> hook; + + /// Hash functors tuple + /** + This is mandatory type and has no predefined one. + + At least, two hash functors should be provided. All hash functor + should be orthogonal (different): for each i,j: i != j => h[i](x) != h[j](x) . + The hash functors are defined as std::tuple< H1, H2, ... Hn > : + \@code cds::opt::hash< std::tuple< h1, h2 > > \@endcode + The number of hash functors specifies the number \p k - the count of hash tables in cuckoo hashing. + Up to 10 different hash functors are supported. + */ + typedef cds::opt::none hash; + + /// Concurrent access policy + /** + Available opt::mutex_policy types: + - cuckoo::striping - simple, but the lock array is not resizable + - cuckoo::refinable - resizable lock array, but more complex access to set data. + + Default is cuckoo::striping. + */ + typedef cuckoo::striping<> mutex_policy; + + /// Key equality functor + /** + Default is std::equal_to + */ + typedef opt::none equal_to; + + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// specifies binary predicate used for key comparison. + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// Item counter + /** + The type for item counting feature. + Default is cds::atomicity::item_counter + + Only atomic item counter type is allowed. + */ + typedef atomicity::item_counter item_counter; + + /// Allocator type + /** + The allocator type for allocating bucket tables. + */ + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Disposer + /** + The disposer functor is used in CuckooSet::clear member function + to free set's node. + */ + typedef intrusive::opt::v::empty_disposer disposer; + + /// Internal statistics. Available statistics: cuckoo::stat, cuckoo::empty_stat + typedef empty_stat stat; + }; + + /// Metafunction converting option list to CuckooSet traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + \p Options list see \ref CuckooSet. + */ + template + struct make_traits { + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< cuckoo::type_traits, CDS_OPTIONS10 >::type + ,CDS_OPTIONS11 + >::type type ; ///< Result of metafunction + }; + + //@cond + namespace details { + template + class bucket_entry; + + template + class bucket_entry + { + public: + typedef Node node_type; + typedef cuckoo::list_probeset_class probeset_class; + typedef cuckoo::list probeset_type; + + protected: + node_type * pHead; + unsigned int nSize; + + public: + class iterator + { + node_type * pNode; + friend class bucket_entry; + + public: + iterator() + : pNode( null_ptr()) + {} + iterator( node_type * p ) + : pNode( p ) + {} + iterator( iterator const& it) + : pNode( it.pNode ) + {} + + iterator& operator=( iterator const& it ) + { + pNode = it.pNode; + return *this; + } + + iterator& operator=( node_type * p ) + { + pNode = p; + return *this; + } + + node_type * operator->() + { + return pNode; + } + node_type& operator*() + { + assert( pNode != null_ptr()); + return *pNode; + } + + // preinc + iterator& operator ++() + { + if ( pNode ) + pNode = pNode->m_pNext; + return *this; + } + + bool operator==(iterator const& it ) const + { + return pNode == it.pNode; + } + bool operator!=(iterator const& it ) const + { + return !( *this == it ); + } + }; + + public: + bucket_entry() + : pHead( null_ptr()) + , nSize(0) + { + static_assert(( std::is_same::value ), "Incompatible node type" ); + } + + iterator begin() + { + return iterator(pHead); + } + iterator end() + { + return iterator(); + } + + void insert_after( iterator it, node_type * p ) + { + node_type * pPrev = it.pNode; + if ( pPrev ) { + p->m_pNext = pPrev->m_pNext; + pPrev->m_pNext = p; + } + else { + // insert as head + p->m_pNext = pHead; + pHead = p; + } + ++nSize; + } + + void remove( iterator itPrev, iterator itWhat ) + { + node_type * pPrev = itPrev.pNode; + node_type * pWhat = itWhat.pNode; + assert( (!pPrev && pWhat == pHead) || (pPrev && pPrev->m_pNext == pWhat) ); + + if ( pPrev ) + pPrev->m_pNext = pWhat->m_pNext; + else { + assert( pWhat == pHead ); + pHead = pHead->m_pNext; + } + pWhat->clear(); + --nSize; + } + + void clear() + { + node_type * pNext; + for ( node_type * pNode = pHead; pNode; pNode = pNext ) { + pNext = pNode->m_pNext; + pNode->clear(); + } + + nSize = 0; + pHead = null_ptr(); + } + + template + void clear( Disposer disp ) + { + node_type * pNext; + for ( node_type * pNode = pHead; pNode; pNode = pNext ) { + pNext = pNode->m_pNext; + pNode->clear(); + cds::unref(disp)( pNode ); + } + + nSize = 0; + pHead = null_ptr(); + } + + unsigned int size() const + { + return nSize; + } + }; + + template + class bucket_entry > + { + public: + typedef Node node_type; + typedef cuckoo::vector_probeset_class probeset_class; + typedef cuckoo::vector probeset_type; + + static unsigned int const c_nCapacity = probeset_type::c_nCapacity; + + protected: + node_type * m_arrNode[c_nCapacity]; + unsigned int m_nSize; + + void shift_up( unsigned int nFrom ) + { + assert( m_nSize < c_nCapacity ); + + // std alorithm + if ( nFrom < m_nSize ) + std::copy_backward( m_arrNode + nFrom, m_arrNode + m_nSize, m_arrNode + m_nSize + 1 ); + + // alternative: low-level byte copying + //memmove( m_arrNode + nFrom + 1, m_arrNode + nFrom, (m_nSize - nFrom) * sizeof(m_arrNode[0]) ); + } + + void shift_down( node_type ** pFrom ) + { + assert( m_arrNode <= pFrom && pFrom < m_arrNode + m_nSize); + // std algo + std::copy( pFrom + 1, m_arrNode + m_nSize, pFrom ); + + // alternative: low-level byte copying + //memmove( pFrom + 1, pFrom, (m_nSize - nFrom - 1) * sizeof(m_arrNode[0])); + } + public: + class iterator + { + node_type ** pArr; + friend class bucket_entry; + + public: + iterator() + : pArr( null_ptr() ) + {} + iterator( node_type ** p ) + : pArr(p) + {} + iterator( iterator const& it) + : pArr( it.pArr ) + {} + + iterator& operator=( iterator const& it ) + { + pArr = it.pArr; + return *this; + } + + node_type * operator->() + { + assert( pArr != null_ptr()); + return *pArr; + } + node_type& operator*() + { + assert( pArr != null_ptr()); + assert( *pArr != null_ptr()); + return *(*pArr); + } + + // preinc + iterator& operator ++() + { + ++pArr; + return *this; + } + + bool operator==(iterator const& it ) const + { + return pArr == it.pArr; + } + bool operator!=(iterator const& it ) const + { + return !( *this == it ); + } + }; + + public: + bucket_entry() + : m_nSize(0) + { + memset( m_arrNode, 0, sizeof(m_arrNode)); + static_assert(( std::is_same::value ), "Incompatible node type" ); + } + + iterator begin() + { + return iterator(m_arrNode); + } + iterator end() + { + return iterator(m_arrNode + size()); + } + + void insert_after( iterator it, node_type * p ) + { + assert( m_nSize < c_nCapacity ); + assert( !it.pArr || (m_arrNode <= it.pArr && it.pArr <= m_arrNode + m_nSize)); + + if ( it.pArr ) { + shift_up( (unsigned int)(it.pArr - m_arrNode) + 1 ); + *(it.pArr + 1) = p; + } + else { + shift_up(0); + m_arrNode[0] = p; + } + ++m_nSize; + } + + void remove( iterator /*itPrev*/, iterator itWhat ) + { + itWhat->clear(); + shift_down( itWhat.pArr ); + --m_nSize; + } + + void clear() + { + m_nSize = 0; + } + + template + void clear( Disposer disp ) + { + for ( unsigned int i = 0; i < m_nSize; ++i ) { + cds::unref(disp)( m_arrNode[i] ); + } + m_nSize = 0; + } + + unsigned int size() const + { + return m_nSize; + } + }; + + template + struct hash_ops { + static void store( Node * pNode, size_t * pHashes ) + { + memcpy( pNode->m_arrHash, pHashes, sizeof(size_t) * ArraySize ); + } + static bool equal_to( Node& node, unsigned int nTable, size_t nHash ) + { + return node.m_arrHash[nTable] == nHash; + } + }; + template + struct hash_ops + { + static void store( Node * /*pNode*/, size_t * /*pHashes*/ ) + {} + static bool equal_to( Node& /*node*/, unsigned int /*nTable*/, size_t /*nHash*/ ) + { + return true; + } + }; + + template + struct contains; + + template + struct contains + { + template + static bool find( BucketEntry& probeset, Position& pos, unsigned int nTable, size_t nHash, Q const& val, Compare cmp ) + { + // Ordered version + typedef typename BucketEntry::iterator bucket_iterator; + + bucket_iterator itPrev; + + for ( bucket_iterator it = probeset.begin(), itEnd = probeset.end(); it != itEnd; ++it ) { + int cmpRes = cmp( *NodeTraits::to_value_ptr(*it), val ); + if ( cmpRes >= 0 ) { + pos.itFound = it; + pos.itPrev = itPrev; + return cmpRes == 0; + } + + itPrev = it; + } + + pos.itPrev = itPrev; + pos.itFound = probeset.end(); + return false; + } + }; + + template + struct contains + { + template + static bool find( BucketEntry& probeset, Position& pos, unsigned int nTable, size_t nHash, Q const& val, EqualTo eq ) + { + // Unordered version + typedef typename BucketEntry::iterator bucket_iterator; + typedef typename BucketEntry::node_type node_type; + + bucket_iterator itPrev; + + for ( bucket_iterator it = probeset.begin(), itEnd = probeset.end(); it != itEnd; ++it ) { + if ( hash_ops::equal_to( *it, nTable, nHash ) && eq( *NodeTraits::to_value_ptr(*it), val )) { + pos.itFound = it; + pos.itPrev = itPrev; + return true; + } + itPrev = it; + } + + pos.itPrev = itPrev; + pos.itFound = probeset.end(); + return false; + } + }; + + } // namespace details + //@endcond + + } // namespace cuckoo + + /// Cuckoo hash set + /** @ingroup cds_intrusive_map + + Source + - [2007] M.Herlihy, N.Shavit, M.Tzafrir "Concurrent Cuckoo Hashing. Technical report" + - [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming" + + About Cuckoo hashing + + [From "The Art of Multiprocessor Programming"] + Cuckoo hashing is a hashing algorithm in which a newly added item displaces any earlier item + occupying the same slot. For brevity, a table is a k-entry array of items. For a hash set f size + N = 2k we use a two-entry array of tables, and two independent hash functions, + h0, h1: KeyRange -> 0,...,k-1 + mapping the set of possible keys to entries in he array. To test whether a value \p x is in the set, + find(x) tests whether either table[0][h0(x)] or table[1][h1(x)] is + equal to \p x. Similarly, erase(x)checks whether \p x is in either table[0][h0(x)] + or table[1][h1(x)], ad removes it if found. + + The insert(x) successively "kicks out" conflicting items until every key has a slot. + To add \p x, the method swaps \p x with \p y, the current occupant of table[0][h0(x)]. + If the prior value was \p NULL, it is done. Otherwise, it swaps the newly nest-less value \p y + for the current occupant of table[1][h1(y)] in the same way. As before, if the prior value + was \p NULL, it is done. Otherwise, the method continues swapping entries (alternating tables) + until it finds an empty slot. We might not find an empty slot, either because the table is full, + or because the sequence of displacement forms a cycle. We therefore need an upper limit on the + number of successive displacements we are willing to undertake. When this limit is exceeded, + we resize the hash table, choose new hash functions and start over. + + For concurrent cuckoo hashing, rather than organizing the set as a two-dimensional table of + items, we use two-dimensional table of probe sets, where a probe set is a constant-sized set + of items with the same hash code. Each probe set holds at most \p PROBE_SIZE items, but the algorithm + tries to ensure that when the set is quiescent (i.e no method call in progress) each probe set + holds no more than THRESHOLD < PROBE_SET items. While method calls are in-flight, a probe + set may temporarily hold more than \p THRESHOLD but never more than \p PROBE_SET items. + + In current implementation, a probe set can be defined either as a (single-linked) list + or as a fixed-sized vector, optionally ordered. + + In description above two-table cuckoo hashing (k = 2) has been considered. + We can generalize this approach for k >= 2 when we have \p k hash functions + h[0], ... h[k-1] and \p k tables table[0], ... table[k-1]. + + The search in probe set is linear, the complexity is O(PROBE_SET) . + The probe set may be ordered or not. Ordered probe set can be more efficient since + the average search complexity is O(PROBE_SET/2). + However, the overhead of sorting can eliminate a gain of ordered search. + + The probe set is ordered if opt::compare or opt::less is specified in \p Traits template + parameter. Otherwise, the probe set is unordered and \p Traits must contain + opt::equal_to option. + + The cds::intrusive::cuckoo namespace contains \p %CuckooSet-related declarations. + + Template arguments: + - \p T - the type stored in the set. The type must be based on cuckoo::node (for cuckoo::base_hook) + or it must have a member of type %cuckoo::node (for cuckoo::member_hook), + or it must be convertible to \p %cuckoo::node (for cuckoo::traits_hook) + - \p Traits - type traits. See cuckoo::type_traits for explanation. It is possible to declare option-based + set with cuckoo::make_traits metafunction result as \p Traits template argument. + + Template argument list \p Options... of cuckoo::make_traits metafunction are: + - intrusive::opt::hook - hook used. Possible values are: cuckoo::base_hook, cuckoo::member_hook, cuckoo::traits_hook. + If the option is not specified, %cuckoo::base_hook<> is used. + - opt::hash - hash functor tuple, mandatory option. At least, two hash functors should be provided. All hash functor + should be orthogonal (different): for each i,j: i != j => h[i](x) != h[j](x) . + The hash functors are passed as std::tuple< H1, H2, ... Hn > . The number of hash functors specifies + the number \p k - the count of hash tables in cuckoo hashing. If the compiler supports variadic templates + then k is unlimited, otherwise up to 10 different hash functors are supported. + - opt::mutex_policy - concurrent access policy. + Available policies: cuckoo::striping, cuckoo::refinable. + Default is cuckoo::striping. + - opt::equal_to - key equality functor like \p std::equal_to. + If this functor is defined then the probe-set will be unordered. + If opt::compare or opt::less option is specified too, then the probe-set will be ordered + and opt::equal_to will be ignored. + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + If opt::compare or opt::less option is specified, then the probe-set will be ordered. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + If opt::compare or opt::less option is specified, then the probe-set will be ordered. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::item_counter + The item counter should be atomic. + - opt::allocator - the allocator type using for allocating bucket tables. + Default is \p CDS_DEFAULT_ALLOCATOR + - intrusive::opt::disposer - the disposer type used in \ref clear() member function for + freeing nodes. Default is intrusive::opt::v::empty_disposer + - opt::stat - internal statistics. Possibly types: cuckoo::stat, cuckoo::empty_stat. + Default is cuckoo::empty_stat + + The probe set options cuckoo::probeset_type and cuckoo::store_hash are taken from \p node type + specified by \p opt::hook option. + + How to use + + You should incorporate cuckoo::node into your struct \p T and provide + appropriate cuckoo::type_traits::hook in your \p Traits template parameters. Usually, for \p Traits you + define a struct based on cuckoo::type_traits. + + Example for base hook and list-based probe-set: + \code + #include + + // Data stored in cuckoo set + // We use list as probe-set container and store hash values in the node + // (since we use two hash functions we should store 2 hash values per node) + struct my_data: public cds::intrusive::cuckoo::node< cds::intrusive::cuckoo::list, 2 > + { + // key field + std::string strKey; + + // other data + // ... + }; + + // Provide equal_to functor for my_data since we will use unordered probe-set + struct my_data_equal_to { + bool operator()( const my_data& d1, const my_data& d2 ) const + { + return d1.strKey.compare( d2.strKey ) == 0; + } + + bool operator()( const my_data& d, const std::string& s ) const + { + return d.strKey.compare(s) == 0; + } + + bool operator()( const std::string& s, const my_data& d ) const + { + return s.compare( d.strKey ) == 0; + } + }; + + // Provide two hash functor for my_data + struct hash1 { + size_t operator()(std::string const& s) const + { + return cds::opt::v::hash( s ); + } + size_t operator()( my_data const& d ) const + { + return (*this)( d.strKey ); + } + }; + + struct hash2: private hash1 { + size_t operator()(std::string const& s) const + { + size_t h = ~( hash1::operator()(s)); + return ~h + 0x9e3779b9 + (h << 6) + (h >> 2); + } + size_t operator()( my_data const& d ) const + { + return (*this)( d.strKey ); + } + }; + + // Declare type traits + struct my_traits: public cds::intrusive::cuckoo::type_traits + { + typedef cds::intrusive::cuckoo::base_hook< + cds::intrusive::cuckoo::probeset_type< my_data::probeset_type > + ,cds::intrusive::cuckoo::store_hash< my_data::hash_array_size > + > hook; + typedef my_data_equa_to equal_to; + typedef std::tuple< hash1, hash2 > hash; + }; + + // Declare CuckooSet type + typedef cds::intrusive::CuckooSet< my_data, my_traits > my_cuckoo_set; + + // Equal option-based declaration + typedef cds::intrusive::CuckooSet< my_data, + cds::intrusive::cuckoo::make_traits< + cds::intrusive::opt::hook< cds::intrusive::cuckoo::base_hook< + cds::intrusive::cuckoo::probeset_type< my_data::probeset_type > + ,cds::intrusive::cuckoo::store_hash< my_data::hash_array_size > + > > + ,cds::opt::hash< std::tuple< hash1, hash2 > > + ,cds::opt::equal_to< my_data_equal_to > + >::type + > opt_cuckoo_set; + \endcode + + If we provide \p compare function instead of \p equal_to for \p my_data + we get as a result a cuckoo set with ordered probe set that may improve + performance. + Example for base hook and ordered vector-based probe-set: + + \code + #include + + // Data stored in cuckoo set + // We use a vector of capacity 4 as probe-set container and store hash values in the node + // (since we use two hash functions we should store 2 hash values per node) + struct my_data: public cds::intrusive::cuckoo::node< cds::intrusive::cuckoo::vector<4>, 2 > + { + // key field + std::string strKey; + + // other data + // ... + }; + + // Provide compare functor for my_data since we want to use ordered probe-set + struct my_data_compare { + int operator()( const my_data& d1, const my_data& d2 ) const + { + return d1.strKey.compare( d2.strKey ); + } + + int operator()( const my_data& d, const std::string& s ) const + { + return d.strKey.compare(s); + } + + int operator()( const std::string& s, const my_data& d ) const + { + return s.compare( d.strKey ); + } + }; + + // Provide two hash functor for my_data + struct hash1 { + size_t operator()(std::string const& s) const + { + return cds::opt::v::hash( s ); + } + size_t operator()( my_data const& d ) const + { + return (*this)( d.strKey ); + } + }; + + struct hash2: private hash1 { + size_t operator()(std::string const& s) const + { + size_t h = ~( hash1::operator()(s)); + return ~h + 0x9e3779b9 + (h << 6) + (h >> 2); + } + size_t operator()( my_data const& d ) const + { + return (*this)( d.strKey ); + } + }; + + // Declare type traits + struct my_traits: public cds::intrusive::cuckoo::type_traits + { + typedef cds::intrusive::cuckoo::base_hook< + cds::intrusive::cuckoo::probeset_type< my_data::probeset_type > + ,cds::intrusive::cuckoo::store_hash< my_data::hash_array_size > + > hook; + typedef my_data_compare compare; + typedef std::tuple< hash1, hash2 > hash; + }; + + // Declare CuckooSet type + typedef cds::intrusive::CuckooSet< my_data, my_traits > my_cuckoo_set; + + // Equal option-based declaration + typedef cds::intrusive::CuckooSet< my_data, + cds::intrusive::cuckoo::make_traits< + cds::intrusive::opt::hook< cds::intrusive::cuckoo::base_hook< + cds::intrusive::cuckoo::probeset_type< my_data::probeset_type > + ,cds::intrusive::cuckoo::store_hash< my_data::hash_array_size > + > > + ,cds::opt::hash< std::tuple< hash1, hash2 > > + ,cds::opt::compare< my_data_compare > + >::type + > opt_cuckoo_set; + \endcode + + */ + template + class CuckooSet + { + public: + typedef T value_type ; ///< The value type stored in the set + typedef Traits options ; ///< Set traits + + typedef typename options::hook hook ; ///< hook type + typedef typename hook::node_type node_type ; ///< node type + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + + typedef typename options::hash hash ; ///< hash functor tuple wrapped for internal use + typedef typename hash::hash_tuple_type hash_tuple_type ; ///< Type of hash tuple + + typedef typename options::stat stat ; ///< internal statistics type + + typedef typename options::mutex_policy original_mutex_policy ; ///< Concurrent access policy, see cuckoo::type_traits::mutex_policy + + /// Actual mutex policy + /** + Actual mutex policy is built from mutex policy type provided by \p Traits template argument (see cuckoo::type_traits::mutex_policy) + but mutex policy internal statistics is conformed with cukoo::type_traits::stat type provided by \p Traits: + - if \p %cuckoo::type_traits::stat is cuckoo::empty_stat then mutex policy statistics is already empty one + - otherwise real mutex policy statistics is used + */ + typedef typename original_mutex_policy::template rebind_statistics< + typename std::conditional< + std::is_same< stat, cuckoo::empty_stat >::value + ,typename original_mutex_policy::empty_stat + ,typename original_mutex_policy::real_stat + >::type + >::other mutex_policy; + + static bool const c_isSorted = !( std::is_same< typename options::compare, opt::none >::value + && std::is_same< typename options::less, opt::none >::value ) ; ///< whether the probe set should be ordered + static size_t const c_nArity = hash::size ; ///< the arity of cuckoo hashing: the number of hash functors provided; minimum 2. + + /// Key equality functor; used only for unordered probe-set + typedef typename opt::details::make_equal_to< value_type, options, !c_isSorted>::type key_equal_to; + + /// key comparing functor based on opt::compare and opt::less option setter. Used only for ordered probe set + typedef typename opt::details::make_comparator< value_type, options >::type key_comparator; + + /// allocator type + typedef typename options::allocator allocator; + + /// item counter type + typedef typename options::item_counter item_counter; + + /// node disposer + typedef typename options::disposer disposer; + + protected: + //@cond + typedef typename node_type::probeset_class probeset_class; + typedef typename node_type::probeset_type probeset_type; + static unsigned int const c_nNodeHashArraySize = node_type::hash_array_size; + + typedef typename mutex_policy::scoped_cell_lock scoped_cell_lock; + typedef typename mutex_policy::scoped_cell_trylock scoped_cell_trylock; + typedef typename mutex_policy::scoped_full_lock scoped_full_lock; + typedef typename mutex_policy::scoped_resize_lock scoped_resize_lock; + + typedef cuckoo::details::bucket_entry< node_type, probeset_type > bucket_entry; + typedef typename bucket_entry::iterator bucket_iterator; + typedef cds::details::Allocator< bucket_entry, allocator > bucket_table_allocator; + + typedef size_t hash_array[c_nArity] ; ///< hash array + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_insert_functor { + void operator()( value_type& ) + {} + }; + + struct empty_erase_functor { + void operator()( value_type const& ) + {} + }; + + struct empty_find_functor { + template + void operator()( value_type& item, Q& val ) + {} + }; +# endif + +# if !defined(CDS_CXX11_LAMBDA_SUPPORT) || ((CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL ) && _MSC_VER == 1600) + template + class disposer_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + disposer_wrapper( Disposer d): base_class(d) {} + + void operator()( node_type * pNode ) + { + base_class::get()( node_traits::to_value_ptr( pNode )); + } + }; +# endif + + struct position { + bucket_iterator itPrev; + bucket_iterator itFound; + }; + + typedef typename std::conditional< c_isSorted + , cuckoo::details::contains< node_traits, true > + , cuckoo::details::contains< node_traits, false > + >::type contains_action; + + template + struct predicate_wrapper { + typedef typename std::conditional< c_isSorted, cds::opt::details::make_comparator_from_less, Predicate>::type type; + }; + + typedef typename std::conditional< c_isSorted, key_comparator, key_equal_to >::type key_predicate; + //@endcond + + public: + static unsigned int const c_nDefaultProbesetSize = 4 ; ///< default probeset size + static size_t const c_nDefaultInitialSize = 16 ; ///< default initial size + static unsigned int const c_nRelocateLimit = c_nArity * 2 - 1 ; ///< Count of attempts to relocate before giving up + + protected: + bucket_entry * m_BucketTable[ c_nArity ] ; ///< Bucket tables + + size_t m_nBucketMask ; ///< Hash bitmask; bucket table size minus 1. + unsigned int const m_nProbesetSize ; ///< Probe set size + unsigned int const m_nProbesetThreshold ; ///< Probe set threshold + + hash m_Hash ; ///< Hash functor tuple + mutex_policy m_MutexPolicy ; ///< concurrent access policy + item_counter m_ItemCounter ; ///< item counter + mutable stat m_Stat ; ///< internal statistics + + protected: + //@cond + static void check_common_constraints() + { + static_assert( (c_nArity == mutex_policy::c_nArity), "The count of hash functors must be equal to mutex_policy arity" ); + } + + void check_probeset_properties() const + { + assert( m_nProbesetThreshold < m_nProbesetSize ); + + // if probe set type is cuckoo::vector then m_nProbesetSize == N + assert( node_type::probeset_size == 0 || node_type::probeset_size == m_nProbesetSize ); + } + + template + void hashing( size_t * pHashes, Q const& v ) const + { + m_Hash( pHashes, v ); + } + + void copy_hash( size_t * pHashes, value_type const& v ) const + { + if ( c_nNodeHashArraySize ) + memcpy( pHashes, node_traits::to_node_ptr( v )->get_hash(), sizeof(pHashes[0]) * c_nNodeHashArraySize ); + else + hashing( pHashes, v ); + } + + bucket_entry& bucket( unsigned int nTable, size_t nHash ) + { + assert( nTable < c_nArity ); + return m_BucketTable[nTable][nHash & m_nBucketMask]; + } + + static void store_hash( node_type * pNode, size_t * pHashes ) + { + cuckoo::details::hash_ops< node_type, c_nNodeHashArraySize >::store( pNode, pHashes ); + //memcpy( pNode->m_arrHash, pHashes, sizeof(size_t) * c_nArity ); + } + + static bool equal_hash( node_type& node, unsigned int nTable, size_t nHash ) + { + return cuckoo::details::hash_ops< node_type, c_nNodeHashArraySize >::equal_to( node, nTable, nHash ); + } + + void allocate_bucket_tables( size_t nSize ) + { + assert( cds::beans::is_power2( nSize ) ); + + m_nBucketMask = nSize - 1; + bucket_table_allocator alloc; + for ( unsigned int i = 0; i < c_nArity; ++i ) + m_BucketTable[i] = alloc.NewArray( nSize ); + } + + static void free_bucket_tables( bucket_entry ** pTable, size_t nCapacity ) + { + bucket_table_allocator alloc; + for ( unsigned int i = 0; i < c_nArity; ++i ) { + alloc.Delete( pTable[i], nCapacity ); + pTable[i] = null_ptr(); + } + } + void free_bucket_tables() + { + free_bucket_tables( m_BucketTable, m_nBucketMask + 1 ); + } + + static unsigned int const c_nUndefTable = (unsigned int) -1; + template + unsigned int contains( position * arrPos, size_t * arrHash, Q const& val, Predicate pred ) + { + // Buckets must be locked + + for ( unsigned int i = 0; i < c_nArity; ++i ) { + bucket_entry& probeset = bucket( i, arrHash[i] ); + if ( contains_action::find( probeset, arrPos[i], i, arrHash[i], val, pred )) + return i; + } + return c_nUndefTable; + } + + template + value_type * erase_( Q const& val, Predicate pred, Func f ) + { + hash_array arrHash; + hashing( arrHash, val ); + position arrPos[ c_nArity ]; + + { + scoped_cell_lock guard( m_MutexPolicy, arrHash ); + + unsigned int nTable = contains( arrPos, arrHash, val, pred ); + if ( nTable != c_nUndefTable ) { + node_type& node = *arrPos[nTable].itFound; + cds::unref(f)( *node_traits::to_value_ptr(node) ); + bucket( nTable, arrHash[nTable]).remove( arrPos[nTable].itPrev, arrPos[nTable].itFound ); + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return node_traits::to_value_ptr( node ); + } + } + + m_Stat.onEraseFailed(); + return null_ptr(); + } + + template + bool find_( Q& val, Predicate pred, Func f ) + { + hash_array arrHash; + position arrPos[ c_nArity ]; + hashing( arrHash, val ); + scoped_cell_lock sl( m_MutexPolicy, arrHash ); + + unsigned int nTable = contains( arrPos, arrHash, val, pred ); + if ( nTable != c_nUndefTable ) { + cds::unref(f)( *node_traits::to_value_ptr( *arrPos[nTable].itFound ), val ); + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + + bool relocate( unsigned int nTable, size_t * arrGoalHash ) + { + // arrGoalHash contains hash values for relocating element + // Relocating element is first one from bucket( nTable, arrGoalHash[nTable] ) probeset + + m_Stat.onRelocateCall(); + + hash_array arrHash; + value_type * pVal; + for ( unsigned int nRound = 0; nRound < c_nRelocateLimit; ++nRound ) { + m_Stat.onRelocateRound(); + + while ( true ) { + scoped_cell_lock guard( m_MutexPolicy, arrGoalHash ); + + bucket_entry& refBucket = bucket( nTable, arrGoalHash[nTable] ); + if ( refBucket.size() < m_nProbesetThreshold ) { + // probeset is not above the threshold + m_Stat.onFalseRelocateRound(); + return true; + } + + pVal = node_traits::to_value_ptr( *refBucket.begin() ); + copy_hash( arrHash, *pVal ); + + scoped_cell_trylock guard2( m_MutexPolicy, arrHash ); + if ( !guard2.locked() ) + continue ; // try one more time + + refBucket.remove( typename bucket_entry::iterator(), refBucket.begin() ); + + unsigned int i = (nTable + 1) % c_nArity; + + // try insert into free probeset + while ( i != nTable ) { + bucket_entry& bkt = bucket( i, arrHash[i] ); + if ( bkt.size() < m_nProbesetThreshold ) { + position pos; + contains_action::find( bkt, pos, i, arrHash[i], *pVal, key_predicate() ) ; // must return false! + bkt.insert_after( pos.itPrev, node_traits::to_node_ptr( pVal )); + m_Stat.onSuccessRelocateRound(); + return true; + } + i = ( i + 1 ) % c_nArity; + } + + // try insert into partial probeset + i = (nTable + 1) % c_nArity; + while ( i != nTable ) { + bucket_entry& bkt = bucket( i, arrHash[i] ); + if ( bkt.size() < m_nProbesetSize ) { + position pos; + contains_action::find( bkt, pos, i, arrHash[i], *pVal, key_predicate() ) ; // must return false! + bkt.insert_after( pos.itPrev, node_traits::to_node_ptr( pVal )); + nTable = i; + memcpy( arrGoalHash, arrHash, sizeof(arrHash)); + m_Stat.onRelocateAboveThresholdRound(); + goto next_iteration; + } + i = (i + 1) % c_nArity; + } + + // all probeset is full, relocating fault + refBucket.insert_after( typename bucket_entry::iterator(), node_traits::to_node_ptr( pVal )); + m_Stat.onFailedRelocate(); + return false; + } + + next_iteration:; + } + return false; + } + + void resize() + { + m_Stat.onResizeCall(); + + size_t nOldCapacity = bucket_count(); + bucket_entry * pOldTable[ c_nArity ]; + { + scoped_resize_lock guard( m_MutexPolicy ); + + if ( nOldCapacity != bucket_count() ) { + m_Stat.onFalseResizeCall(); + return; + } + + size_t nCapacity = nOldCapacity * 2; + + m_MutexPolicy.resize( nCapacity ); + memcpy( pOldTable, m_BucketTable, sizeof(pOldTable)); + allocate_bucket_tables( nCapacity ); + + typedef typename bucket_entry::iterator bucket_iterator; + hash_array arrHash; + position arrPos[ c_nArity ]; + + for ( unsigned int nTable = 0; nTable < c_nArity; ++nTable ) { + bucket_entry * pTable = pOldTable[nTable]; + for ( size_t k = 0; k < nOldCapacity; ++k ) { + bucket_iterator itNext; + for ( bucket_iterator it = pTable[k].begin(), itEnd = pTable[k].end(); it != itEnd; it = itNext ) { + itNext = it; + ++itNext; + + value_type& val = *node_traits::to_value_ptr( *it ); + copy_hash( arrHash, val ); + contains( arrPos, arrHash, val, key_predicate() ) ; // must return c_nUndefTable + + for ( unsigned int i = 0; i < c_nArity; ++i ) { + bucket_entry& refBucket = bucket( i, arrHash[i] ); + if ( refBucket.size() < m_nProbesetThreshold ) { + refBucket.insert_after( arrPos[i].itPrev, &*it ); + m_Stat.onResizeSuccessMove(); + goto do_next; + } + } + + for ( unsigned int i = 0; i < c_nArity; ++i ) { + bucket_entry& refBucket = bucket( i, arrHash[i] ); + if ( refBucket.size() < m_nProbesetSize ) { + refBucket.insert_after( arrPos[i].itPrev, &*it ); + assert( refBucket.size() > 1 ); + copy_hash( arrHash, *node_traits::to_value_ptr( *refBucket.begin()) ); + m_Stat.onResizeRelocateCall(); + relocate( i, arrHash ); + break; + } + } + do_next:; + } + } + } + } + free_bucket_tables( pOldTable, nOldCapacity ); + } + + CDS_CONSTEXPR static unsigned int calc_probeset_size( unsigned int nProbesetSize ) CDS_NOEXCEPT + { + return nProbesetSize + ? nProbesetSize + : ( node_type::probeset_size ? node_type::probeset_size : c_nDefaultProbesetSize ) +; + } + //@endcond + + public: + /// Default constructor + /** + Initial size = \ref c_nDefaultInitialSize + + Probe set size: + - \p c_nDefaultProbesetSize if \p probeset_type is \p cuckoo::list + - \p Capacity if \p probeset_type is cuckoo::vector + + Probe set threshold = probe set size - 1 + */ + CuckooSet() + : m_nProbesetSize( calc_probeset_size(0) ) + , m_nProbesetThreshold( m_nProbesetSize - 1 ) + , m_MutexPolicy( c_nDefaultInitialSize ) + { + check_common_constraints(); + check_probeset_properties(); + + allocate_bucket_tables( c_nDefaultInitialSize ); + } + + /// Constructs the set object with given probe set size and threshold + /** + If probe set type is cuckoo::vector vector + then \p nProbesetSize should be equal to vector's \p Capacity. + */ + CuckooSet( + size_t nInitialSize ///< Initial set size; if 0 - use default initial size \ref c_nDefaultInitialSize + , unsigned int nProbesetSize ///< probe set size + , unsigned int nProbesetThreshold = 0 ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 + ) + : m_nProbesetSize( calc_probeset_size(nProbesetSize) ) + , m_nProbesetThreshold( nProbesetThreshold ? nProbesetThreshold : m_nProbesetSize - 1 ) + , m_MutexPolicy( cds::beans::ceil2(nInitialSize ? nInitialSize : c_nDefaultInitialSize )) + { + check_common_constraints(); + check_probeset_properties(); + + allocate_bucket_tables( nInitialSize ? cds::beans::ceil2( nInitialSize ) : c_nDefaultInitialSize ); + } + + /// Constructs the set object with given hash functor tuple + /** + The probe set size and threshold are set as default, see CuckooSet() + */ + CuckooSet( + hash_tuple_type const& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : m_nProbesetSize( calc_probeset_size(0) ) + , m_nProbesetThreshold( m_nProbesetSize -1 ) + , m_Hash( h ) + , m_MutexPolicy( c_nDefaultInitialSize ) + { + check_common_constraints(); + check_probeset_properties(); + + allocate_bucket_tables( c_nDefaultInitialSize ); + } + + /// Constructs the set object with given probe set properties and hash functor tuple + /** + If probe set type is cuckoo::vector vector + then \p nProbesetSize should be equal to vector's \p Capacity. + */ + CuckooSet( + size_t nInitialSize ///< Initial set size; if 0 - use default initial size \ref c_nDefaultInitialSize + , unsigned int nProbesetSize ///< probe set size, positive integer + , unsigned int nProbesetThreshold ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 + , hash_tuple_type const& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : m_nProbesetSize( calc_probeset_size(nProbesetSize) ) + , m_nProbesetThreshold( nProbesetThreshold ? nProbesetThreshold : m_nProbesetSize - 1) + , m_Hash( h ) + , m_MutexPolicy( cds::beans::ceil2(nInitialSize ? nInitialSize : c_nDefaultInitialSize )) + { + check_common_constraints(); + check_probeset_properties(); + + allocate_bucket_tables( nInitialSize ? cds::beans::ceil2( nInitialSize ) : c_nDefaultInitialSize ); + } + +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + /// Constructs the set object with given hash functor tuple (move semantics) + /** + The probe set size and threshold are set as default, see CuckooSet() + */ + CuckooSet( + hash_tuple_type&& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : m_nProbesetSize( calc_probeset_size(0) ) + , m_nProbesetThreshold( m_nProbesetSize / 2 ) + , m_Hash( std::forward(h) ) + , m_MutexPolicy( c_nDefaultInitialSize ) + { + check_common_constraints(); + check_probeset_properties(); + + allocate_bucket_tables( c_nDefaultInitialSize ); + } + + /// Constructs the set object with given probe set properties and hash functor tuple (move semantics) + /** + If probe set type is cuckoo::vector vector + then \p nProbesetSize should be equal to vector's \p Capacity. + */ + CuckooSet( + size_t nInitialSize ///< Initial set size; if 0 - use default initial size \ref c_nDefaultInitialSize + , unsigned int nProbesetSize ///< probe set size, positive integer + , unsigned int nProbesetThreshold ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 + , hash_tuple_type&& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : m_nProbesetSize( calc_probeset_size(nProbesetSize) ) + , m_nProbesetThreshold( nProbesetThreshold ? nProbesetThreshold : m_nProbesetSize - 1) + , m_Hash( std::forward(h) ) + , m_MutexPolicy( cds::beans::ceil2(nInitialSize ? nInitialSize : c_nDefaultInitialSize )) + { + check_common_constraints(); + check_probeset_properties(); + + allocate_bucket_tables( nInitialSize ? cds::beans::ceil2( nInitialSize ) : c_nDefaultInitialSize ); + } +# endif // ifdef CDS_MOVE_SEMANTICS_SUPPORT + + /// Destructor + ~CuckooSet() + { + free_bucket_tables(); + } + + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return insert( val, []( value_type& ) {} ); +# else + return insert( val, empty_insert_functor() ); +# endif + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. + + The user-defined functor is called only if the inserting is success and can be passed by reference + using boost::ref + */ + template + bool insert( value_type& val, Func f ) + { + hash_array arrHash; + position arrPos[ c_nArity ]; + unsigned int nGoalTable; + + hashing( arrHash, val ); + node_type * pNode = node_traits::to_node_ptr( val ); + store_hash( pNode, arrHash ); + + while (true) { + { + scoped_cell_lock guard( m_MutexPolicy, arrHash ); + + if ( contains( arrPos, arrHash, val, key_predicate() ) != c_nUndefTable ) { + m_Stat.onInsertFailed(); + return false; + } + + for ( unsigned int i = 0; i < c_nArity; ++i ) { + bucket_entry& refBucket = bucket( i, arrHash[i] ); + if ( refBucket.size() < m_nProbesetThreshold ) { + refBucket.insert_after( arrPos[i].itPrev, pNode ); + cds::unref(f)( val ); + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + return true; + } + } + + for ( unsigned int i = 0; i < c_nArity; ++i ) { + bucket_entry& refBucket = bucket( i, arrHash[i] ); + if ( refBucket.size() < m_nProbesetSize ) { + refBucket.insert_after( arrPos[i].itPrev, pNode ); + cds::unref(f)( val ); + ++m_ItemCounter; + nGoalTable = i; + assert( refBucket.size() > 1 ); + copy_hash( arrHash, *node_traits::to_value_ptr( *refBucket.begin()) ); + goto do_relocate; + } + } + } + + m_Stat.onInsertResize(); + resize(); + } + + do_relocate: + m_Stat.onInsertRelocate(); + if ( !relocate( nGoalTable, arrHash )) { + m_Stat.onInsertRelocateFault(); + m_Stat.onInsertResize(); + resize(); + } + + m_Stat.onInsertSuccess(); + return true; + } + + /// Ensures that the \p val exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the set, then \p val is inserted into the set. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same thing. + + The functor may change non-key fields of the \p item. + + You may pass \p func argument by reference using boost::ref or cds::ref. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair ensure( value_type& val, Func func ) + { + hash_array arrHash; + position arrPos[ c_nArity ]; + unsigned int nGoalTable; + + hashing( arrHash, val ); + node_type * pNode = node_traits::to_node_ptr( val ); + store_hash( pNode, arrHash ); + + while (true) { + { + scoped_cell_lock guard( m_MutexPolicy, arrHash ); + + unsigned int nTable = contains( arrPos, arrHash, val, key_predicate() ); + if ( nTable != c_nUndefTable ) { + cds::unref(func)( false, *node_traits::to_value_ptr( *arrPos[nTable].itFound ), val ); + m_Stat.onEnsureExist(); + return std::make_pair( true, false ); + } + + node_type * pNode = node_traits::to_node_ptr( val ); + store_hash( pNode, arrHash ); + + for ( unsigned int i = 0; i < c_nArity; ++i ) { + bucket_entry& refBucket = bucket( i, arrHash[i] ); + if ( refBucket.size() < m_nProbesetThreshold ) { + refBucket.insert_after( arrPos[i].itPrev, pNode ); + cds::unref(func)( true, val, val ); + ++m_ItemCounter; + m_Stat.onEnsureSuccess(); + return std::make_pair( true, true ); + } + } + + for ( unsigned int i = 0; i < c_nArity; ++i ) { + bucket_entry& refBucket = bucket( i, arrHash[i] ); + if ( refBucket.size() < m_nProbesetSize ) { + refBucket.insert_after( arrPos[i].itPrev, pNode ); + cds::unref(func)( true, val, val ); + ++m_ItemCounter; + nGoalTable = i; + assert( refBucket.size() > 1 ); + copy_hash( arrHash, *node_traits::to_value_ptr( *refBucket.begin()) ); + goto do_relocate; + } + } + } + + m_Stat.onEnsureResize(); + resize(); + } + + do_relocate: + m_Stat.onEnsureRelocate(); + if ( !relocate( nGoalTable, arrHash )) { + m_Stat.onEnsureRelocateFault(); + m_Stat.onEnsureResize(); + resize(); + } + + m_Stat.onEnsureSuccess(); + return std::make_pair( true, true ); + } + + /// Unlink the item \p val from the set + /** + The function searches the item \p val in the set and unlink it + if it is found and is equal to \p val (here, the equality means that + \p val belongs to the set: if \p item is an item found then + unlink is successful iif &val == &item) + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + hash_array arrHash; + hashing( arrHash, val ); + position arrPos[ c_nArity ]; + + { + scoped_cell_lock guard( m_MutexPolicy, arrHash ); + + unsigned int nTable = contains( arrPos, arrHash, val, key_predicate() ); + if ( nTable != c_nUndefTable && node_traits::to_value_ptr(*arrPos[nTable].itFound) == &val ) { + bucket( nTable, arrHash[nTable]).remove( arrPos[nTable].itPrev, arrPos[nTable].itFound ); + --m_ItemCounter; + m_Stat.onUnlinkSuccess(); + return true; + } + } + + m_Stat.onUnlinkFailed(); + return false; + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_CuckooSet_erase + The function searches an item with key equal to \p val in the set, + unlinks it from the set, and returns a pointer to unlinked item. + + If the item with key equal to \p val is not found the function return \p NULL. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + value_type * erase( Q const& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase( val, [](value_type const&) {} ); +# else + return erase( val, empty_erase_functor() ); +# endif + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_CuckooSet_erase "erase(Q const&)" + but \p pred is used for key comparing. + If cuckoo set is ordered, then \p Predicate should have the interface and semantics like \p std::less. + If cuckoo set is unordered, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p Predicate must imply the same element order as the comparator used for building the set. + */ + template + value_type * erase_with( Q const& val, Predicate pred ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_( val, typename predicate_wrapper::type(), [](value_type const&) {} ); +# else + return erase_( val, typename predicate_wrapper::type(), empty_erase_functor() ); +# endif + } + + /// Delete the item from the set + /** \anchor cds_intrusive_CuckooSet_erase_func + The function searches an item with key equal to \p val in the set, + call \p f functor with item found, unlinks it from the set, and returns a pointer to unlinked item. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + The functor may be passed by reference with boost:ref + + If the item with key equal to \p val is not found the function return \p NULL. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + value_type * erase( Q const& val, Func f ) + { + return erase_( val, key_predicate(), f ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_CuckooSet_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + If you use ordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::less. + If you use unordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p Predicate must imply the same element order as the comparator used for building the set. + */ + template + value_type * erase_with( Q const& val, Predicate pred, Func f ) + { + return erase_( val, typename predicate_wrapper::type(), f ); + } + + /// Find the key \p val + /** \anchor cds_intrusive_CuckooSet_find_func + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return find_( val, key_predicate(), f ); + } + + /// Find the key \p val using \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_CuckooSet_find_func "find(Q&, Func)" + but \p pred is used for key comparison. + If you use ordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::less. + If you use unordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Predicate pred, Func f ) + { + return find_( val, typename predicate_wrapper::type(), f ); + } + + /// Find the key \p val + /** \anchor cds_intrusive_CuckooSet_find_cfunc + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return find_( val, key_predicate(), f ); + } + + /// Find the key \p val using \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_CuckooSet_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparison. + If you use ordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::less. + If you use unordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Predicate pred, Func f ) + { + return find_( val, typename predicate_wrapper::type(), f ); + } + + /// Find the key \p val + /** \anchor cds_intrusive_CuckooSet_find_val + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool find( Q const& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return find( val, [](value_type&, Q const& ) {} ); +# else + return find( val, empty_find_functor() ); +# endif + } + + /// Find the key \p val using \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_CuckooSet_find_val "find(Q const&)" + but \p pred is used for key comparison. + If you use ordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::less. + If you use unordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Predicate pred ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return find_with( val, typename predicate_wrapper::type(), [](value_type& , Q const& ) {} ); +# else + return find_with( val, typename predicate_wrapper::type(), empty_find_functor() ); +# endif + } + + /// Clears the set + /** + The function unlinks all items from the set. + For any item \ref disposer is called + */ + void clear() + { + clear_and_dispose( disposer() ); + } + + /// Clears the set and calls \p disposer for each item + /** + The function unlinks all items from the set calling \p disposer for each item. + \p Disposer functor interface is: + \code + struct Disposer{ + void operator()( value_type * p ); + }; + \endcode + + The \ref disposer specified in \p Traits options is not called. + */ + template + void clear_and_dispose( Disposer oDisposer ) + { + // locks entire array + scoped_full_lock sl( m_MutexPolicy ); + +# if !defined(CDS_CXX11_LAMBDA_SUPPORT) || ((CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL ) && _MSC_VER == 1600) + disposer_wrapper disp( oDisposer ); +# endif + for ( unsigned int i = 0; i < c_nArity; ++i ) { + bucket_entry * pEntry = m_BucketTable[i]; + bucket_entry * pEnd = pEntry + m_nBucketMask + 1; + for ( ; pEntry != pEnd ; ++pEntry ) { +# if defined(CDS_CXX11_LAMBDA_SUPPORT) && !((CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL) && _MSC_VER == 1600) + // MSVC 10: error to call nested typedefs node_traits from lambda + pEntry->clear( [&oDisposer]( node_type * pNode ){ oDisposer( node_traits::to_value_ptr( pNode )) ; } ); +# else + pEntry->clear( cds::ref(disp) ); +# endif + } + } + m_ItemCounter.reset(); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + size_t size() const + { + return m_ItemCounter; + } + + /// Returns the size of hash table + /** + The hash table size is non-constant and can be increased via resizing. + */ + size_t bucket_count() const + { + return m_nBucketMask + 1; + } + + /// Returns lock array size + size_t lock_count() const + { + return m_MutexPolicy.lock_count(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + /// Returns const reference to mutex policy internal statistics + typename mutex_policy::statistics_type const& mutex_policy_statistics() const + { + return m_MutexPolicy.statistics(); + } + }; +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_CUCKOO_SET_H diff --git a/cds/intrusive/deque_stat.h b/cds/intrusive/deque_stat.h new file mode 100644 index 00000000..7146542d --- /dev/null +++ b/cds/intrusive/deque_stat.h @@ -0,0 +1,82 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_DEQUE_STAT_H +#define __CDS_INTRUSIVE_DEQUE_STAT_H + +#include + +namespace cds { namespace intrusive { + + /// Deque internal statistics. May be used for debugging or profiling + /** @ingroup cds_intrusive_helper + Template argument \p Counter defines type of counter. + Default is cds::atomics::event_counter. + You may use other counter type like as cds::atomics::item_counter, + or even integral type, for example, \p int. + */ + template + struct deque_stat + { + typedef Counter counter_type ; ///< Counter type + + counter_type m_PushFrontCount ; ///< push front event count + counter_type m_PushBackCount ; ///< push back event count + counter_type m_PopFrontCount ; ///< pop front event count + counter_type m_PopBackCount ; ///< pop back event count + counter_type m_PopEmptyCount ; ///< pop from empty deque event count + counter_type m_PushFrontContentionCount ; ///< \p push_front contention count + counter_type m_PushBackContentionCount ; ///< \p push_back contention count + counter_type m_PopFrontContentionCount ; ///< \p pop_front contention count + counter_type m_PopBackContentionCount ; ///< \p pop_back contention count + + /// Register \p push_front call + void onPushFront() { ++m_PushFrontCount; } + + /// Register \p push_back call + void onPushBack() { ++m_PushBackCount; } + + /// Register \p pop_front call + void onPopFront() { ++m_PopFrontCount; } + + /// Register \p pop_back call + void onPopBack() { ++m_PopBackCount; } + + /// Register popping from empty deque + void onPopEmpty() { ++m_PopEmptyCount; } + + /// Register "\p push_front contention" event + void onPushFrontContention() { ++m_PushFrontContentionCount; } + + /// Register "\p push_back contention" event + void onPushBackContention() { ++m_PushBackContentionCount; } + + /// Register "\p pop_front contention" event + void onPopFrontContention() { ++m_PopFrontContentionCount; } + + /// Register "\p pop_back contention" event + void onPopBackContention() { ++m_PopBackContentionCount; } + }; + + + /// Dummy deque statistics - no counting is performed. Support interface like \ref deque_stat + /** @ingroup cds_intrusive_helper + */ + struct deque_dummy_stat + { + //@cond + void onPushFront() {} + void onPushBack() {} + void onPopFront() {} + void onPopBack() {} + void onPopEmpty() {} + void onPushFrontContention() {} + void onPushBackContention() {} + void onPopFrontContention() {} + void onPopBackContention() {} + //@endcond + }; + +}} // namespace cds::intrusive + + +#endif // #ifndef __CDS_INTRUSIVE_DEQUE_STAT_H diff --git a/cds/intrusive/details/dummy_node_holder.h b/cds/intrusive/details/dummy_node_holder.h new file mode 100644 index 00000000..5d4120a0 --- /dev/null +++ b/cds/intrusive/details/dummy_node_holder.h @@ -0,0 +1,67 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_DETAILS_DUMMY_NODE_HOLDER_H +#define __CDS_INTRUSIVE_DETAILS_DUMMY_NODE_HOLDER_H + +#include +#include + +//@cond +namespace cds { namespace intrusive { namespace details { + + template + class dummy_node: public Node + { + typedef Node node_type; + + node_type m_Dummy; + public: + node_type * get() + { + return &m_Dummy; + } + + void retire() + {} + }; + + template + class dummy_node< cds::gc::HRC, Node, Alloc > + { + typedef Node node_type; + typedef cds::gc::HRC gc; + typedef cds::details::Allocator< node_type, Alloc> allocator_type; + + node_type * m_pDummy; + + struct dummy_node_disposer { + void operator()( node_type * p ) + { + assert( p != null_ptr()); + + p->m_pNext.store( null_ptr(), CDS_ATOMIC::memory_order_release ); + allocator_type().Delete( p ); + } + }; + + public: + dummy_node() + : m_pDummy( allocator_type().New() ) + {} + + node_type * get() + { + return m_pDummy; + } + + void retire() + { + gc::template retire( m_pDummy ); + } + }; + +}}} // namepace cds::intrusive::details + +//@endcond + +#endif // #ifndef __CDS_INTRUSIVE_DETAILS_DUMMY_NODE_HOLDER_H diff --git a/cds/intrusive/details/ellen_bintree_base.h b/cds/intrusive/details/ellen_bintree_base.h new file mode 100644 index 00000000..d24cb7b7 --- /dev/null +++ b/cds/intrusive/details/ellen_bintree_base.h @@ -0,0 +1,688 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_DETAILS_ELLEN_BINTREE_BASE_H +#define __CDS_INTRUSIVE_DETAILS_ELLEN_BINTREE_BASE_H + +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// EllenBinTree related declarations + namespace ellen_bintree { + + //Forwards + template struct base_node; + template struct node; + template struct internal_node; + + /// Update descriptor + /** + Update descriptor is used internally for helping concurrent threads + to complete modifying operation. + Usually, you should not use \p update_desc type directly until + you want to develop special free-list of update descriptor. + + Template parameters: + - \p LeafNode - leaf node type, see \ref node + - \p InternalNode - internal node type, see \ref internal_node + + @note Size of update descriptor is constant. + It does not depends of template arguments. + */ + template + struct update_desc { + //@cond + typedef LeafNode leaf_node; + typedef InternalNode internal_node; + + typedef cds::details::marked_ptr< update_desc, 3 > update_ptr; + + enum { + Clean = 0, + DFlag = 1, + IFlag = 2, + Mark = 3 + }; + + struct insert_info { + internal_node * pParent; + internal_node * pNew; + leaf_node * pLeaf; + bool bRightLeaf; + }; + struct delete_info { + internal_node * pGrandParent; + internal_node * pParent; + leaf_node * pLeaf; + update_desc * pUpdateParent; + bool bDisposeLeaf; // true if pLeaf should be disposed, false otherwise (for extract operation, RCU) + bool bRightParent; + bool bRightLeaf; + }; + + union { + insert_info iInfo; + delete_info dInfo; + }; + + update_desc * pNextRetire ; // for local retired list (RCU) + + update_desc() + : pNextRetire( null_ptr() ) + {} + //@endcond + }; + + //@cond + struct basic_node + { + enum flags { + internal = 1, ///< set for internal node + key_infinite1 = 2, ///< set if node's key is Inf1 + key_infinite2 = 4, ///< set if node's key is Inf2 + + key_infinite = key_infinite1 | key_infinite2 ///< Cumulative infinite flags + }; + + unsigned int m_nFlags ; ///< Internal flags + + /// Constructs leaf (bIntrenal == false) or internal (bInternal == true) node + explicit basic_node( bool bInternal ) + : m_nFlags( bInternal ? internal : 0 ) + {} + + /// Checks if the node is a leaf + bool is_leaf() const + { + return !is_internal(); + } + + /// Checks if the node is internal + bool is_internal() const + { + return (m_nFlags & internal) != 0; + } + + /// Returns infinite key, 0 if the node is not infinite + unsigned int infinite_key() const + { + return m_nFlags & key_infinite; + } + + /// Sets infinite key for the node (for internal use only!!!) + void infinite_key( int nInf ) + { + m_nFlags &= ~key_infinite; + switch ( nInf ) { + case 1: + m_nFlags |= key_infinite1; + break; + case 2: + m_nFlags |= key_infinite2; + break; + case 0: + break; + default: + assert( false ); + break; + } + } + }; + + template + struct base_node: public basic_node + { + typedef basic_node base_class; + + typedef GC gc ; ///< Garbage collector + /// Constructs leaf (bIntrenal == false) or internal (bInternal == true) node + explicit base_node( bool bInternal ) + : base_class( bInternal ) + {} + }; + //@endcond + + /// Ellen's binary tree leaf node + /** + Template parameters: + - \p GC - one of \ref cds_garbage_collector "garbage collector type" + - \p Tag - a tag used to distinguish between different implementation. An incomplete type may be used as a tag. + */ + template + struct node +# ifndef CDS_DOXYGEN_INVOKED + : public base_node< GC > +# endif + { + //@cond + typedef base_node< GC > base_class; + //@endcond + + typedef GC gc ; ///< Garbage collector type + typedef Tag tag ; ///< Tag + + /// Default ctor + node() + : base_class( false ) + {} + }; + + /// Ellen's binary tree internal node + /** + Template arguments: + - \p Key - key type + - \p LeafNode - leaf node type + */ + template + struct internal_node +# ifndef CDS_DOXYGEN_INVOKED + : public base_node +# endif + { + //@cond + typedef base_node base_class; + //@endcond + + typedef Key key_type ; ///< key type + typedef LeafNode leaf_node ; ///< type of leaf node + typedef update_desc< leaf_node, internal_node > update_desc_type; ///< Update descriptor + typedef typename update_desc_type::update_ptr update_ptr ; ///< Marked pointer to update descriptor + + key_type m_Key ; ///< Regular key + CDS_ATOMIC::atomic m_pLeft ; ///< Left subtree + CDS_ATOMIC::atomic m_pRight ; ///< Right subtree + CDS_ATOMIC::atomic m_pUpdate ; ///< Update descriptor + //@cond + uintptr_t m_nEmptyUpdate; ///< ABA prevention for m_pUpdate, from 0..2^16 step 4 + //@endcond + + /// Default ctor + internal_node() + : base_class( true ) + , m_pLeft( null_ptr() ) + , m_pRight( null_ptr() ) + , m_pUpdate( update_ptr() ) + , m_nEmptyUpdate(0) + {} + + //@cond + update_ptr null_update_desc() + { + return update_ptr( reinterpret_cast( (++m_nEmptyUpdate << 2) & 0xFFFF ) ); + } + //@endcond + }; + + /// Types of EllenBinTree node + /** + This struct declares different \p %EllenBinTree node types. + It can be useful for simplifying \p %EllenBinTree node declaration in your application. + */ + template + struct node_types + { + typedef node leaf_node_type ; ///< Leaf node type + typedef internal_node internal_node_type ; ///< Internal node type + typedef update_desc update_desc_type ; ///< Update descriptor type + }; + + //@cond + struct undefined_gc; + struct default_hook { + typedef undefined_gc gc; + typedef opt::none tag; + }; + //@endcond + + //@cond + template < typename HookType, CDS_DECL_OPTIONS2> + struct hook + { + typedef typename opt::make_options< default_hook, CDS_OPTIONS2>::type options; + typedef typename options::gc gc; + typedef typename options::tag tag; + typedef node node_type; + typedef HookType hook_type; + }; + //@endcond + + /// Base hook + /** + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - a tag + */ + template < CDS_DECL_OPTIONS2 > + struct base_hook: public hook< opt::base_hook_tag, CDS_OPTIONS2 > + {}; + + /// Member hook + /** + \p MemberOffset defines offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - a tag + */ + template < size_t MemberOffset, CDS_DECL_OPTIONS2 > + struct member_hook: public hook< opt::member_hook_tag, CDS_OPTIONS2 > + { + //@cond + static const size_t c_nMemberOffset = MemberOffset; + //@endcond + }; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - a tag + */ + template + struct traits_hook: public hook< opt::traits_hook_tag, CDS_OPTIONS2 > + { + //@cond + typedef NodeTraits node_traits; + //@endcond + }; + + /// Key extracting functor option setter + template + struct key_extractor { + //@cond + template struct pack: public Base + { + typedef Type key_extractor; + }; + //@endcond + }; + + /// Update descriptor allocator option setter + template + struct update_desc_allocator { + //@cond + template struct pack: public Base + { + typedef Type update_desc_allocator; + }; + //@endcond + }; + + /// EllenBinTree internal statistics + template + struct stat { + typedef Counter event_counter ; ///< Event counter type + + event_counter m_nInternalNodeCreated ; ///< Total count of created internal node + event_counter m_nInternalNodeDeleted ; ///< Total count of deleted internal node + event_counter m_nUpdateDescCreated ; ///< Total count of created update descriptors + event_counter m_nUpdateDescDeleted ; ///< Total count of deleted update descriptors + + event_counter m_nInsertSuccess ; ///< Count of success insertion + event_counter m_nInsertFailed ; ///< Count of failed insertion + event_counter m_nInsertRetries ; ///< Count of unsuccessful retries of insertion + event_counter m_nEnsureExist ; ///< Count of \p ensure call for existed node + event_counter m_nEnsureNew ; ///< Count of \p ensure call for new node + event_counter m_nEnsureRetries ; ///< Count of unsuccessful retries of ensuring + event_counter m_nEraseSuccess ; ///< Count of successful call of \p erase and \p unlink + event_counter m_nEraseFailed ; ///< Count of failed call of \p erase and \p unlink + event_counter m_nEraseRetries ; ///< Count of unsuccessful retries inside erasing/unlinking + event_counter m_nFindSuccess ; ///< Count of successful \p find call + event_counter m_nFindFailed ; ///< Count of failed \p find call + event_counter m_nExtractMinSuccess ; ///< Count of successful call of \p extract_min + event_counter m_nExtractMinFailed ; ///< Count of failed call of \p extract_min + event_counter m_nExtractMinRetries ; ///< Count of unsuccessful retries inside \p extract_min + event_counter m_nExtractMaxSuccess ; ///< Count of successful call of \p extract_max + event_counter m_nExtractMaxFailed ; ///< Count of failed call of \p extract_max + event_counter m_nExtractMaxRetries ; ///< Count of unsuccessful retries inside \p extract_max + event_counter m_nSearchRetry ; ///< How many times the deleting node was encountered while searching + + event_counter m_nHelpInsert ; ///< The number of insert help from the other thread + event_counter m_nHelpDelete ; ///< The number of delete help from the other thread + event_counter m_nHelpMark ; ///< The number of delete help (mark phase) from the other thread + event_counter m_nHelpGuardSuccess ; ///< The number of successful guarding of update descriptor data + event_counter m_nHelpGuardFailed ; ///< The number of failed guarding of update descriptor data + + //@cond + void onInternalNodeCreated() { ++m_nInternalNodeCreated ; } + void onInternalNodeDeleted() { ++m_nInternalNodeDeleted ; } + void onUpdateDescCreated() { ++m_nUpdateDescCreated ; } + void onUpdateDescDeleted() { ++m_nUpdateDescDeleted ; } + void onInsertSuccess() { ++m_nInsertSuccess ; } + void onInsertFailed() { ++m_nInsertFailed ; } + void onInsertRetry() { ++m_nInsertRetries ; } + void onEnsureExist() { ++m_nEnsureExist ; } + void onEnsureNew() { ++m_nEnsureNew ; } + void onEnsureRetry() { ++m_nEnsureRetries ; } + void onEraseSuccess() { ++m_nEraseSuccess ; } + void onEraseFailed() { ++m_nEraseFailed ; } + void onEraseRetry() { ++m_nEraseRetries ; } + void onExtractMinSuccess() { ++m_nExtractMinSuccess ; } + void onExtractMinFailed() { ++m_nExtractMinFailed ; } + void onExtractMinRetry() { ++m_nExtractMinRetries ; } + void onExtractMaxSuccess() { ++m_nExtractMaxSuccess ; } + void onExtractMaxFailed() { ++m_nExtractMaxFailed ; } + void onExtractMaxRetry() { ++m_nExtractMaxRetries ; } + void onFindSuccess() { ++m_nFindSuccess ; } + void onFindFailed() { ++m_nFindFailed ; } + void onSearchRetry() { ++m_nSearchRetry ; } + void onHelpInsert() { ++m_nHelpInsert ; } + void onHelpDelete() { ++m_nHelpDelete ; } + void onHelpMark() { ++m_nHelpMark ; } + void onHelpGuardSuccess() { ++m_nHelpGuardSuccess ; } + void onHelpGuardFailed() { ++m_nHelpGuardFailed ; } + //@endcond + }; + + /// EllenBinTree empty statistics + struct empty_stat { + //@cond + void onInternalNodeCreated() {} + void onInternalNodeDeleted() {} + void onUpdateDescCreated() {} + void onUpdateDescDeleted() {} + void onInsertSuccess() {} + void onInsertFailed() {} + void onInsertRetry() {} + void onEnsureExist() {} + void onEnsureNew() {} + void onEnsureRetry() {} + void onEraseSuccess() {} + void onEraseFailed() {} + void onEraseRetry() {} + void onExtractMinSuccess() {} + void onExtractMinFailed() {} + void onExtractMinRetry() {} + void onExtractMaxSuccess() {} + void onExtractMaxFailed() {} + void onExtractMaxRetry() {} + void onFindSuccess() {} + void onFindFailed() {} + void onSearchRetry() {} + void onHelpInsert() {} + void onHelpDelete() {} + void onHelpMark() {} + void onHelpGuardSuccess() {} + void onHelpGuardFailed() {} + //@endcond + }; + + /// Type traits for EllenBinTree class + struct type_traits + { + /// Hook used + /** + Possible values are: ellen_bintree::base_hook, ellen_bintree::member_hook, ellen_bintree::traits_hook. + */ + typedef base_hook<> hook; + + /// Key extracting functor + /** + You should explicit define a valid functor. + The functor has the following prototype: + \code + struct key_extractor { + void operator ()( Key& dest, T const& src ); + }; + \endcode + It should initialize \p dest key from \p src data. + The functor is used to initialize internal nodes. + */ + typedef opt::none key_extractor; + + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + + See cds::opt::compare option description for functor interface. + + You should provide \p compare or \p less functor. + See \ref cds_intrusive_EllenBinTree_rcu_less "predicate requirements". + */ + typedef opt::none compare; + + /// Specifies binary predicate used for key compare. + /** + See cds::opt::less option description for predicate interface. + + You should provide \p compare or \p less functor. + See \ref cds_intrusive_EllenBinTree_rcu_less "predicate requirements". + */ + typedef opt::none less; + + /// Disposer + /** + The functor used for dispose removed items. Default is opt::v::empty_disposer. + */ + typedef opt::v::empty_disposer disposer; + + /// Item counter + /** + The type for item counting feature (see cds::opt::item_counter). + Default is no item counter (\ref atomicity::empty_item_counter) + */ + typedef atomicity::empty_item_counter item_counter; + + /// C++ memory ordering model + /** + List of available memory ordering see opt::memory_model + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Allocator for update descriptors + /** + The allocator type is used for \ref update_desc. + + Update descriptor is helping data structure with short lifetime and it is good candidate + for pooling. The number of simultaneously existing descriptors is bounded number + limited the number of threads working with the tree. + Therefore, a bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue + is good choice for the free-list of update descriptors, + see cds::memory::vyukov_queue_pool free-list implementation. + + Also notice that size of update descriptor is constant and not dependent on the type of data + stored in the tree so single free-list object can be used for several \p EllenBinTree object. + */ + typedef CDS_DEFAULT_ALLOCATOR update_desc_allocator; + + /// Allocator for internal nodes + /** + The allocator type is used for \ref internal_node. + */ + typedef CDS_DEFAULT_ALLOCATOR node_allocator; + + /// Internal statistics + /** + Possible types: \p ellen_bintree::empty_stat (the default), \p ellen_bintree::stat or any + other with interface like \p %stat. + */ + typedef empty_stat stat; + + /// RCU deadlock checking policy (only for \ref cds_intrusive_EllenBinTree_rcu "RCU-based EllenBinTree") + /** + List of available options see opt::rcu_check_deadlock + */ + typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock; + }; + + /// Metafunction converting option list to EllenBinTree traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + \p Options list see \ref EllenBinTree. + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS12 >::type + ,CDS_OPTIONS12 + >::type type; +# endif + }; + + //@cond + namespace details { + + template + struct compare + { + typedef Compare key_compare; + typedef Key key_type; + typedef T value_type; + typedef NodeTraits node_traits; + + template + int operator()( Q1 const& v1, Q2 const& v2) const + { + return key_compare()( v1, v2 ); + } + + template + int operator()( internal_node const& n1, internal_node const& n2 ) const + { + if ( n1.infinite_key() ) + return n2.infinite_key() ? n1.infinite_key() - n2.infinite_key() : 1; + else if ( n2.infinite_key() ) + return -1; + return operator()( n1.m_Key, n2.m_Key ); + } + + template + int operator()( internal_node const& n, Q const& v ) const + { + if ( n.infinite_key() ) + return 1; + return operator()( n.m_Key, v ); + } + + template + int operator()( Q const& v, internal_node const& n ) const + { + if ( n.infinite_key() ) + return -1; + return operator()( v, n.m_Key ); + } + + template + int operator()( node const& n1, node const& n2 ) const + { + if ( n1.infinite_key() != n2.infinite_key() ) + return n1.infinite_key() - n2.infinite_key(); + return operator()( *node_traits::to_value_ptr( n1 ), *node_traits::to_value_ptr( n2 )); + } + + template + int operator()( node const& n, Q const& v ) const + { + if ( n.infinite_key() ) + return 1; + return operator()( *node_traits::to_value_ptr( n ), v ); + } + + template + int operator()( Q const& v, node const& n ) const + { + if ( n.infinite_key() ) + return -1; + return operator()( v, *node_traits::to_value_ptr( n ) ); + } + + template + int operator()( base_node const& n1, base_node const& n2 ) const + { + if ( n1.infinite_key() != n2.infinite_key() ) + return n1.infinite_key() - n2.infinite_key(); + if ( n1.is_leaf() ) { + if ( n2.is_leaf() ) + return operator()( node_traits::to_leaf_node( n1 ), node_traits::to_leaf_node( n2 )); + else + return operator()( node_traits::to_leaf_node( n1 ), node_traits::to_internal_node( n2 )); + } + + if ( n2.is_leaf() ) + return operator()( node_traits::to_internal_node( n1 ), node_traits::to_leaf_node( n2 )); + else + return operator()( node_traits::to_internal_node( n1 ), node_traits::to_internal_node( n2 )); + } + + template + int operator()( base_node const& n, Q const& v ) const + { + if ( n.infinite_key()) + return 1; + if ( n.is_leaf() ) + return operator()( node_traits::to_leaf_node( n ), v ); + return operator()( node_traits::to_internal_node( n ), v ); + } + + template + int operator()( Q const& v, base_node const& n ) const + { + return -operator()( n, v ); + } + + template + int operator()( base_node const& i, internal_node const& n ) const + { + if ( i.is_leaf() ) + return operator()( static_cast(i), n ); + return operator()( static_cast const&>(i), n ); + } + + template + int operator()( internal_node const& n, base_node const& i ) const + { + return -operator()( i, n ); + } + + template + int operator()( node const& n, internal_node > const& i ) const + { + if ( !n.infinite_key() ) { + if ( i.infinite_key() ) + return -1; + return operator()( n, i.m_Key ); + } + + if ( !i.infinite_key()) + return 1; + return int( n.infinite_key()) - int( i.infinite_key()); + } + + template + int operator()( internal_node > const& i, node const& n ) const + { + return -operator()( n, i ); + } + + }; + + } // namespace details + //@endcond + + } // namespace ellen_bintree + + // Forwards + template < class GC, typename Key, typename T, class Traits = ellen_bintree::type_traits > + class EllenBinTree; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_DETAILS_ELLEN_BINTREE_BASE_H diff --git a/cds/intrusive/ellen_bintree_hp.h b/cds/intrusive/ellen_bintree_hp.h new file mode 100644 index 00000000..02e52ccd --- /dev/null +++ b/cds/intrusive/ellen_bintree_hp.h @@ -0,0 +1,9 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_ELLEN_BINTREE_HP_H +#define __CDS_INTRUSIVE_ELLEN_BINTREE_HP_H + +#include +#include + +#endif // #ifndef __CDS_INTRUSIVE_ELLEN_BINTREE_HP_H diff --git a/cds/intrusive/ellen_bintree_impl.h b/cds/intrusive/ellen_bintree_impl.h new file mode 100644 index 00000000..30f36175 --- /dev/null +++ b/cds/intrusive/ellen_bintree_impl.h @@ -0,0 +1,1639 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_ELLEN_BINTREE_IMPL_H +#define __CDS_INTRUSIVE_ELLEN_BINTREE_IMPL_H + +#include +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// Ellen's et al binary search tree + /** @ingroup cds_intrusive_map + @ingroup cds_intrusive_tree + @anchor cds_intrusive_EllenBinTree + + Source: + - [2010] F.Ellen, P.Fatourou, E.Ruppert, F.van Breugel "Non-blocking Binary Search Tree" + + %EllenBinTree is an unbalanced leaf-oriented binary search tree that implements the set + abstract data type. Nodes maintains child pointers but not parent pointers. + Every internal node has exactly two children, and all data of type \p T currently in + the tree are stored in the leaves. Internal nodes of the tree are used to direct \p find + operation along the path to the correct leaf. The keys (of \p Key type) stored in internal nodes + may or may not be in the set. \p Key type is a subset of \p T type. + There should be exactly defined a key extracting functor for converting object of type \p T to + object of type \p Key. + + Due to \p extract_min and \p extract_max member functions the \p %EllenBinTree can act as + a priority queue. In this case you should provide unique compound key, for example, + the priority value plus some uniformly distributed random value. + + @note In the current implementation we do not use helping technique described in the original paper. + In Hazard Pointer schema helping is too complicated and does not give any observable benefits. + Instead of helping, when a thread encounters a concurrent operation it just spins waiting for + the operation done. Such solution allows greatly simplify the implementation of tree. + + @warning Recall the tree is unbalanced. The complexity of operations is O(log N) + for uniformly distributed random keys, but in worst case the complexity is O(N). + + @note Do not include header file explicitly. + There are header file for each GC type: + - - for Hazard Pointer GC cds::gc::HP + - - for Pass-the-Buck GC cds::gc::PTB + - - for RCU GC + (see \ref cds_intrusive_EllenBinTree_rcu "RCU-based EllenBinTree") + + Template arguments : + - \p GC - garbage collector used, possible types are cds::gc::HP, cds::gc::PTB. + Note that cds::gc::HRC is not supported. + - \p Key - key type, a subset of \p T + - \p T - type to be stored in tree's leaf nodes. The type must be based on ellen_bintree::node + (for ellen_bintree::base_hook) or it must have a member of type ellen_bintree::node + (for ellen_bintree::member_hook). + - \p Traits - type traits. See ellen_bintree::type_traits for explanation. + + It is possible to declare option-based tree with cds::intrusive::ellen_bintree::make_traits metafunction + instead of \p Traits template argument. + Template argument list \p Options of cds::intrusive::ellen_bintree::make_traits metafunction are: + - opt::hook - hook used. Possible values are: ellen_bintree::base_hook, ellen_bintree::member_hook, ellen_bintree::traits_hook. + If the option is not specified, ellen_bintree::base_hook<> is used. + - ellen_bintree::key_extractor - key extracting functor, mandatory option. The functor has the following prototype: + \code + struct key_extractor { + void operator ()( Key& dest, T const& src ); + }; + \endcode + It should initialize \p dest key from \p src data. The functor is used to initialize internal nodes. + - opt::compare - key compare functor. No default functor is provided. + If the option is not specified, \p %opt::less is used. + - opt::less - specifies binary predicate used for key compare. At least \p %opt::compare or \p %opt::less should be defined. + - opt::disposer - the functor used for dispose removed nodes. Default is opt::v::empty_disposer. Due the nature + of GC schema the disposer may be called asynchronously. The disposer is used only for leaf nodes. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that means no item counting. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - ellen_bintree::update_desc_allocator - an allocator of \ref ellen_bintree::update_desc "update descriptors", + default is CDS_DEFAULT_ALLOCATOR. + Note that update descriptor is helping data structure with short lifetime and it is good candidate for pooling. + The number of simultaneously existing descriptors is bounded and depends on the number of threads + working with the tree and GC internals. + A bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue is good candidate + for the free-list of update descriptors, see cds::memory::vyukov_queue_pool free-list implementation. + Also notice that size of update descriptor is constant and not dependent on the type of data + stored in the tree so single free-list object can be used for all \p %EllenBinTree objects. + - opt::node_allocator - the allocator used for internal nodes. Default is \ref CDS_DEFAULT_ALLOCATOR. + - opt::stat - internal statistics. Available types: ellen_bintree::stat, ellen_bintree::empty_stat (the default) + + @anchor cds_intrusive_EllenBinTree_less + Predicate requirements + + opt::less, opt::compare and other predicates using with member fuctions should accept at least parameters + of type \p T and \p Key in any combination. + For example, for \p Foo struct with \p std::string key field the appropiate \p less functor is: + \code + struct Foo: public cds::intrusive::ellen_bintree::node< ... > + { + std::string m_strKey; + ... + }; + + struct less { + bool operator()( Foo const& v1, Foo const& v2 ) const + { return v1.m_strKey < v2.m_strKey ; } + + bool operator()( Foo const& v, std::string const& s ) const + { return v.m_strKey < s ; } + + bool operator()( std::string const& s, Foo const& v ) const + { return s < v.m_strKey ; } + + // Support comparing std::string and char const * + bool operator()( std::string const& s, char const * p ) const + { return s.compare(p) < 0 ; } + + bool operator()( Foo const& v, char const * p ) const + { return v.m_strKey.compare(p) < 0 ; } + + bool operator()( char const * p, std::string const& s ) const + { return s.compare(p) > 0; } + + bool operator()( char const * p, Foo const& v ) const + { return v.m_strKey.compare(p) > 0; } + }; + \endcode + */ + template < class GC, + typename Key, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = ellen_bintree::type_traits +#else + class Traits +#endif + > + class EllenBinTree + { + public: + typedef GC gc ; ///< Garbage collector used + typedef Key key_type ; ///< type of a key stored in internal nodes; key is a part of \p value_type + typedef T value_type ; ///< type of value stored in the binary tree + typedef Traits options ; ///< Traits template parameter + + typedef typename options::hook hook ; ///< hook type + typedef typename hook::node_type node_type ; ///< node type + typedef typename options::disposer disposer ; ///< leaf node disposer + + typedef cds::gc::guarded_ptr< gc, value_type > guarded_ptr; ///< Guarded pointer + + protected: + //@cond + typedef ellen_bintree::base_node< gc > tree_node ; ///< Base type of tree node + typedef node_type leaf_node ; ///< Leaf node type + typedef ellen_bintree::node_types< gc, key_type, typename leaf_node::tag > node_factory; + typedef typename node_factory::internal_node_type internal_node ; ///< Internal node type + typedef typename node_factory::update_desc_type update_desc ; ///< Update descriptor + typedef typename update_desc::update_ptr update_ptr ; ///< Marked pointer to update descriptor + //@endcond + + public: +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key compare functor based on opt::compare and opt::less option setter. + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< Node traits +# else + typedef typename opt::details::make_comparator< value_type, options >::type key_comparator; + struct node_traits: public get_node_traits< value_type, node_type, hook>::type + { + static internal_node const& to_internal_node( tree_node const& n ) + { + assert( n.is_internal() ); + return static_cast( n ); + } + + static leaf_node const& to_leaf_node( tree_node const& n ) + { + assert( n.is_leaf() ); + return static_cast( n ); + } + }; +# endif + + typedef typename options::item_counter item_counter; ///< Item counting policy used + typedef typename options::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename options::stat stat ; ///< internal statistics type + typedef typename options::key_extractor key_extractor ; ///< key extracting functor + + typedef typename options::node_allocator node_allocator ; ///< Internal node allocator + typedef typename options::update_desc_allocator update_desc_allocator ; ///< Update descriptor allocator + + protected: + //@cond + typedef ellen_bintree::details::compare< key_type, value_type, key_comparator, node_traits > node_compare; + + typedef cds::details::Allocator< internal_node, node_allocator > cxx_node_allocator; + typedef cds::details::Allocator< update_desc, update_desc_allocator > cxx_update_desc_allocator; + + struct search_result { + enum guard_index { + Guard_GrandParent, + Guard_Parent, + Guard_Leaf, + Guard_updGrandParent, + Guard_updParent, + + // helping + Guard_helpLeaf, + + // end of guard indices + guard_count + }; + + typedef typename gc::template GuardArray< guard_count > guard_array; + guard_array guards; + + internal_node * pGrandParent; + internal_node * pParent; + leaf_node * pLeaf; + update_ptr updParent; + update_ptr updGrandParent; + bool bRightLeaf ; // true if pLeaf is right child of pParent, false otherwise + bool bRightParent ; // true if pParent is right child of pGrandParent, false otherwise + + search_result() + :pGrandParent( null_ptr() ) + ,pParent( null_ptr() ) + ,pLeaf( null_ptr() ) + ,bRightLeaf( false ) + ,bRightParent( false ) + {} + + void clean_help_guards() + { + guards.clear( Guard_helpLeaf ); + } + }; + //@endcond + + protected: + //@cond + internal_node m_Root ; ///< Tree root node (key= Infinite2) + leaf_node m_LeafInf1 ; ///< Infinite leaf 1 (key= Infinite1) + leaf_node m_LeafInf2 ; ///< Infinite leaf 2 (key= Infinite2) + //@endcond + + item_counter m_ItemCounter ; ///< item counter + mutable stat m_Stat ; ///< internal statistics + + protected: + //@cond + static void free_leaf_node( value_type * p ) + { + disposer()( p ); + } + + internal_node * alloc_internal_node() const + { + m_Stat.onInternalNodeCreated(); + internal_node * pNode = cxx_node_allocator().New(); + return pNode; + } + + static void free_internal_node( internal_node * pNode ) + { + cxx_node_allocator().Delete( pNode ); + } + + struct internal_node_deleter { + void operator()( internal_node * p) const + { + free_internal_node( p ); + } + }; + + typedef std::unique_ptr< internal_node, internal_node_deleter> unique_internal_node_ptr; + + update_desc * alloc_update_desc() const + { + m_Stat.onUpdateDescCreated(); + return cxx_update_desc_allocator().New(); + } + + static void free_update_desc( update_desc * pDesc ) + { + cxx_update_desc_allocator().Delete( pDesc ); + } + + void retire_node( tree_node * pNode ) const + { + if ( pNode->is_leaf() ) { + assert( static_cast( pNode ) != &m_LeafInf1 ); + assert( static_cast( pNode ) != &m_LeafInf2 ); + + gc::template retire( node_traits::to_value_ptr( static_cast( pNode )), free_leaf_node ); + } + else { + assert( static_cast( pNode ) != &m_Root ); + m_Stat.onInternalNodeDeleted(); + + gc::template retire( static_cast( pNode ), free_internal_node ); + } + } + + void retire_update_desc( update_desc * p ) const + { + m_Stat.onUpdateDescDeleted(); + gc::template retire( p, free_update_desc ); + } + + void make_empty_tree() + { + m_Root.infinite_key( 2 ); + m_LeafInf1.infinite_key( 1 ); + m_LeafInf2.infinite_key( 2 ); + m_Root.m_pLeft.store( &m_LeafInf1, memory_model::memory_order_relaxed ); + m_Root.m_pRight.store( &m_LeafInf2, memory_model::memory_order_release ); + } + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct trivial_equal_functor { + template + bool operator()( Q const& , leaf_node const& ) const + { + return true; + } + }; + + struct empty_insert_functor { + void operator()( value_type& ) + {} + }; + + struct assign_guard_functor { + typename gc::Guard& m_guard; + assign_guard_functor( typename gc::Guard& guard ) + : m_guard(guard) + {} + + template + void operator()( value_type& val, Q& ) + { + m_guard.assign( &val ); + } + + void operator()( value_type& val ) + { + m_guard.assign( &val ); + } + }; + +# endif + +# if !defined(CDS_CXX11_LAMBDA_SUPPORT) || (CDS_COMPILER == CDS_COMPILER_MSVC && CDS_COMPILER_VERSION == CDS_COMPILER_MSVC10) + struct unlink_equal_functor { + bool operator()( value_type const& v, leaf_node const& n ) const + { + return &v == node_traits::to_value_ptr( n ); + } + }; + struct empty_erase_functor { + void operator()( value_type const& ) + {} + }; +# endif + //@endcond + + public: + /// Default constructor + EllenBinTree() + { + static_assert( (!std::is_same< key_extractor, opt::none >::value), "The key extractor option must be specified" ); + make_empty_tree(); + } + + /// Clears the tree + ~EllenBinTree() + { + unsafe_clear(); + } + + /// Inserts new node + /** + The function inserts \p val in the tree if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the tree, \p false otherwise. + */ + bool insert( value_type& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return insert( val, []( value_type& ) {} ); +# else + return insert( val, empty_insert_functor() ); +# endif + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the tree + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this tree's item by concurrent threads. + The user-defined functor is called only if the inserting is success and may be passed by reference + using boost::ref + */ + template + bool insert( value_type& val, Func f ) + { + typename gc::Guard guardInsert; + guardInsert.assign( &val ); + + unique_internal_node_ptr pNewInternal; + + search_result res; + for ( ;; ) { + if ( search( res, val, node_compare() )) { + if ( pNewInternal.get() ) + m_Stat.onInternalNodeDeleted() ; // unique_internal_node_ptr deletes internal node + m_Stat.onInsertFailed(); + return false; + } + + if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { + + if ( !pNewInternal.get() ) + pNewInternal.reset( alloc_internal_node() ); + + if ( try_insert( val, pNewInternal.get(), res )) { + cds::unref(f)( val ); + pNewInternal.release(); // internal node is linked into the tree and should not be deleted + break; + } + } + + m_Stat.onInsertRetry(); + } + + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + return true; + } + + /// Ensures that the \p val exists in the tree + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the tree, then \p val is inserted into the tree. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the tree + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + The functor can change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You can pass \p func argument by value or by reference using boost::ref or cds::ref. + + Returns std::pair where \p first is \p true if operation is successfull, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the tree. + */ + template + std::pair ensure( value_type& val, Func func ) + { + typename gc::Guard guardInsert; + guardInsert.assign( &val ); + + unique_internal_node_ptr pNewInternal; + + search_result res; + for ( ;; ) { + if ( search( res, val, node_compare() )) { + cds::unref(func)( false, *node_traits::to_value_ptr( res.pLeaf ), val ); + if ( pNewInternal.get() ) + m_Stat.onInternalNodeDeleted() ; // unique_internal_node_ptr deletes internal node + m_Stat.onEnsureExist(); + return std::make_pair( true, false ); + } + + if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { + + if ( !pNewInternal.get() ) + pNewInternal.reset( alloc_internal_node() ); + + if ( try_insert( val, pNewInternal.get(), res )) { + cds::unref(func)( true, val, val ); + pNewInternal.release() ; // internal node has been linked into the tree and should not be deleted + break; + } + } + m_Stat.onEnsureRetry(); + } + + ++m_ItemCounter; + m_Stat.onEnsureNew(); + return std::make_pair( true, true ); + } + + /// Unlinks the item \p val from the tree + /** + The function searches the item \p val in the tree and unlink it from the tree + if it is found and is equal to \p val. + + Difference between \ref erase and \p unlink functions: \p erase finds a key + and deletes the item found. \p unlink finds an item by key and deletes it + only if \p val is an item of the tree, i.e. the pointer to item found + is equal to &val . + + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { +# if defined(CDS_CXX11_LAMBDA_SUPPORT) && !(CDS_COMPILER == CDS_COMPILER_MSVC && CDS_COMPILER_VERSION == CDS_COMPILER_MSVC10) + // vc10 generates an error for the lambda - it sees cds::intrusive::node_traits but not class-defined node_traits + return erase_( val, node_compare(), + []( value_type const& v, leaf_node const& n ) -> bool { return &v == node_traits::to_value_ptr( n ); }, + [](value_type const&) {} ); +# else + return erase_( val, node_compare(), unlink_equal_functor(), empty_erase_functor() ); +# endif + } + + /// Deletes the item from the tree + /** \anchor cds_intrusive_EllenBinTree_erase + The function searches an item with key equal to \p val in the tree, + unlinks it from the tree, and returns \p true. + If the item with key equal to \p val is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( const Q& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_( val, node_compare(), + []( Q const&, leaf_node const& ) -> bool { return true; }, + [](value_type const&) {} ); +# else + return erase_( val, node_compare(), trivial_equal_functor(), empty_erase_functor() ); +# endif + } + + /// Delete the item from the tree with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_erase "erase(Q const&)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + bool erase_with( const Q& val, Less pred ) + { + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_( val, compare_functor(), + []( Q const&, leaf_node const& ) -> bool { return true; }, + [](value_type const&) {} ); +# else + return erase_( val, compare_functor(), trivial_equal_functor(), empty_erase_functor() ); +# endif + } + + /// Deletes the item from the tree + /** \anchor cds_intrusive_EllenBinTree_erase_func + The function searches an item with key equal to \p val in the tree, + call \p f functor with item found, unlinks it from the tree, and returns \p true. + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + The functor can be passed by reference with boost:ref + + If the item with key equal to \p val is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( Q const& val, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_( val, node_compare(), + []( Q const&, leaf_node const& ) -> bool { return true; }, + f ); +# else + return erase_( val, node_compare(), trivial_equal_functor(), f ); +# endif + } + + /// Delete the item from the tree with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_erase_func "erase(Q const&, Func)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + bool erase_with( Q const& val, Less pred, Func f ) + { + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_( val, compare_functor(), + []( Q const&, leaf_node const& ) -> bool { return true; }, + f ); +# else + return erase_( val, compare_functor(), trivial_equal_functor(), f ); +# endif + } + + /// Extracts an item with minimal key from the tree + /** + The function searches an item with minimal key, unlinks it, and returns pointer to an item found in \p dest parameter. + If the tree is empty the function returns \p false. + + @note Due the concurrent nature of the tree, the function extracts nearly minimum key. + It means that the function gets leftmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of tree traversing. + + The guarded pointer \p dest prevents disposer invocation for returned item, + see cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + bool extract_min( guarded_ptr& dest ) + { + return extract_min_( dest.guard()); + } + + /// Extracts an item with maximal key from the tree + /** + The function searches an item with maximal key, unlinks it, and returns pointer to an item found in \p dest parameter. + If the tree is empty the function returns \p false. + + @note Due the concurrent nature of the tree, the function extracts nearly maximal key. + It means that the function gets rightmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key great than rightmost item's key. + So, the function returns the item with maximal key at the moment of tree traversing. + + The guarded pointer \p dest prevents disposer invocation for returned item, + see cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + bool extract_max( guarded_ptr& dest ) + { + return extract_max_( dest.guard() ); + } + + /// Extracts an item from the tree + /** \anchor cds_intrusive_EllenBinTree_extract + The function searches an item with key equal to \p key in the tree, + unlinks it, and returns pointer to an item found in \p dest parameter. + If the item is not found the function returns \p false. + + The guarded pointer \p dest prevents disposer invocation for returned item, + see cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + template + bool extract( guarded_ptr& dest, Q const& key ) + { + return extract_( dest.guard(), key ); + } + + /// Extracts an item from the tree using \p pred for searching + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_extract "extract(guarded_ptr& dest, Q const&)" + but \p pred is used for key compare. + \p Less has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + bool extract_with( guarded_ptr& dest, Q const& key, Less pred ) + { + return extract_with_( dest.guard(), key, pred ); + } + + /// Finds the key \p val + /** @anchor cds_intrusive_EllenBinTree_find_val + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool find( Q const& val ) const + { + search_result res; + if ( search( res, val, node_compare() )) { + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + + /// Finds the key \p val with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_find_val "find(Q const&)" + but \p pred is used for key compare. + \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + \p pred should accept arguments of type \p Q, \p key_type, \p value_type in any combination. + */ + template + bool find_with( Q const& val, Less pred ) const + { + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + + search_result res; + if ( search( res, val, compare_functor() )) { + m_Stat.onFindSuccess(); + return true; + } + m_Stat.onFindFailed(); + return false; + } + + /// Finds the key \p val + /** @anchor cds_intrusive_EllenBinTree_find_func + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the tree \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) const + { + return find_( val, f ); + } + + /// Finds the key \p val with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_find_func "find(Q&, Func)" + but \p pred is used for key comparison. + \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + bool find_with( Q& val, Less pred, Func f ) const + { + return find_with_( val, pred, f ); + } + + /// Finds the key \p val + /** @anchor cds_intrusive_EllenBinTree_find_cfunc + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the tree \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) const + { + return find_( val, f ); + } + + /// Finds the key \p val with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_find_cfunc "find(Q const&, Func)" + but \p pred is used for key compare. + \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + bool find_with( Q const& val, Less pred, Func f ) const + { + return find_with_( val, pred, f ); + } + + /// Finds \p key and returns the item found + /** @anchor cds_intrusive_EllenBinTree_get + The function searches the item with key equal to \p key and returns the item found in \p dest parameter. + The function returns \p true if \p key is found, \p false otherwise. + + The guarded pointer \p dest prevents disposer invocation for returned item, + see cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + template + bool get( guarded_ptr& dest, Q const& key ) const + { + return get_( dest.guard(), key ); + } + + /// Finds \p key with predicate \p pred and returns the item found + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_get "get(guarded_ptr&, Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + bool get_with( guarded_ptr& dest, Q const& key, Less pred ) const + { + return get_with_( dest.guard(), key, pred ); + } + + /// Checks if the tree is empty + bool empty() const + { + return m_Root.m_pLeft.load( memory_model::memory_order_relaxed )->is_leaf(); + } + + /// Clears the tree (thread safe, non-atomic) + /** + The function unlink all items from the tree. + The function is thread safe but not atomic: in multi-threaded environment with parallel insertions + this sequence + \code + tree.clear(); + assert( tree.empty() ); + \endcode + the assertion could be raised. + + For each leaf the \ref disposer will be called after unlinking. + */ + void clear() + { + guarded_ptr gp; + while ( extract_min(gp)); + } + + /// Clears the tree (not thread safe) + /** + This function is not thread safe and may be called only when no other thread deals with the tree. + The function is used in the tree destructor. + */ + void unsafe_clear() + { + while ( true ) { + internal_node * pParent = null_ptr< internal_node *>(); + internal_node * pGrandParent = null_ptr(); + tree_node * pLeaf = const_cast( &m_Root ); + + // Get leftmost leaf + while ( pLeaf->is_internal() ) { + pGrandParent = pParent; + pParent = static_cast( pLeaf ); + pLeaf = pParent->m_pLeft.load( memory_model::memory_order_relaxed ); + } + + if ( pLeaf->infinite_key()) { + // The tree is empty + return; + } + + // Remove leftmost leaf and its parent node + assert( pGrandParent ); + assert( pParent ); + assert( pLeaf->is_leaf() ); + + pGrandParent->m_pLeft.store( pParent->m_pRight.load( memory_model::memory_order_relaxed ), memory_model::memory_order_relaxed ); + free_leaf_node( node_traits::to_value_ptr( static_cast( pLeaf ) ) ); + free_internal_node( pParent ); + } + } + + /// Returns item count in the tree + /** + Only leaf nodes containing user data are counted. + + The value returned depends on item counter type provided by \p Traits template parameter. + If it is atomicity::empty_item_counter this function always returns 0. + Therefore, the function is not suitable for checking the tree emptiness, use \ref empty + member function for this purpose. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + /// Checks internal consistency (not atomic, not thread-safe) + /** + The debugging function to check internal consistency of the tree. + */ + bool check_consistency() const + { + return check_consistency( &m_Root ); + } + + protected: + //@cond + + bool check_consistency( internal_node const * pRoot ) const + { + tree_node * pLeft = pRoot->m_pLeft.load( CDS_ATOMIC::memory_order_relaxed ); + tree_node * pRight = pRoot->m_pRight.load( CDS_ATOMIC::memory_order_relaxed ); + assert( pLeft ); + assert( pRight ); + + if ( node_compare()( *pLeft, *pRoot ) < 0 + && node_compare()( *pRoot, *pRight ) <= 0 + && node_compare()( *pLeft, *pRight ) < 0 ) + { + bool bRet = true; + if ( pLeft->is_internal() ) + bRet = check_consistency( static_cast( pLeft ) ); + assert( bRet ); + + if ( bRet && pRight->is_internal() ) + bRet = bRet && check_consistency( static_cast( pRight )); + assert( bRet ); + + return bRet; + } + return false; + } + + tree_node * protect_child_node( search_result& res, internal_node * pParent, bool bRight, update_ptr updParent ) const + { + tree_node * p; + tree_node * pn = bRight ? pParent->m_pRight.load( memory_model::memory_order_relaxed ) : pParent->m_pLeft.load( memory_model::memory_order_relaxed ); + do { + p = pn; + res.guards.assign( search_result::Guard_Leaf, static_cast( p )); + res.guards.assign( search_result::Guard_helpLeaf, node_traits::to_value_ptr( static_cast( p ) )); + pn = bRight ? pParent->m_pRight.load( memory_model::memory_order_acquire ) : pParent->m_pLeft.load( memory_model::memory_order_acquire ); + } while ( p != pn ); + + // child node is guarded + // See whether pParent->m_pUpdate has not been changed + if ( pParent->m_pUpdate.load( memory_model::memory_order_acquire ) != updParent ) { + // update has been changed - returns nullptr as a flag to search retry + return null_ptr(); + } + + if ( p && p->is_leaf() ) + res.guards.copy( search_result::Guard_Leaf, search_result::Guard_helpLeaf ); + res.guards.clear( search_result::Guard_helpLeaf ); + return p; + } + + update_ptr search_protect_update( search_result& res, CDS_ATOMIC::atomic const& src ) const + { + update_ptr ret; + update_ptr upd( src.load( memory_model::memory_order_relaxed ) ); + do { + ret = upd; + res.guards.assign( search_result::Guard_updParent, upd ); + } while ( ret != (upd = src.load( memory_model::memory_order_acquire )) ); + return ret; + } + + template + bool search( search_result& res, KeyValue const& key, Compare cmp ) const + { + internal_node * pParent; + internal_node * pGrandParent = null_ptr(); + update_ptr updParent; + update_ptr updGrandParent; + bool bRightLeaf; + bool bRightParent = false; + + int nCmp = 0; + + retry: + pParent = null_ptr< internal_node *>(); + //pGrandParent = null_ptr(); + updParent = null_ptr(); + bRightLeaf = false; + tree_node * pLeaf = const_cast( &m_Root ); + while ( pLeaf->is_internal() ) { + res.guards.copy( search_result::Guard_GrandParent, search_result::Guard_Parent ); + pGrandParent = pParent; + res.guards.copy( search_result::Guard_Parent, search_result::Guard_Leaf ); + pParent = static_cast( pLeaf ); + bRightParent = bRightLeaf; + res.guards.copy( search_result::Guard_updGrandParent, search_result::Guard_updParent ); + updGrandParent = updParent; + + updParent = search_protect_update( res, pParent->m_pUpdate ); + + switch ( updParent.bits() ) { + case update_desc::DFlag: + case update_desc::Mark: + m_Stat.onSearchRetry(); + goto retry; + } + + nCmp = cmp( key, *pParent ); + bRightLeaf = nCmp >= 0; + + pLeaf = protect_child_node( res, pParent, bRightLeaf, updParent ); + if ( !pLeaf ) { + m_Stat.onSearchRetry(); + goto retry; + } + } + + assert( pLeaf->is_leaf() ); + nCmp = cmp( key, *static_cast(pLeaf) ); + + res.pGrandParent = pGrandParent; + res.pParent = pParent; + res.pLeaf = static_cast( pLeaf ); + res.updParent = updParent; + res.updGrandParent = updGrandParent; + res.bRightParent = bRightParent; + res.bRightLeaf = bRightLeaf; + + return nCmp == 0; + } + + bool search_min( search_result& res ) const + { + internal_node * pParent; + internal_node * pGrandParent; + update_ptr updParent; + update_ptr updGrandParent; + + retry: + pParent = null_ptr< internal_node *>(); + pGrandParent = null_ptr(); + updParent = null_ptr(); + tree_node * pLeaf = const_cast( &m_Root ); + while ( pLeaf->is_internal() ) { + res.guards.copy( search_result::Guard_GrandParent, search_result::Guard_Parent ); + pGrandParent = pParent; + res.guards.copy( search_result::Guard_Parent, search_result::Guard_Leaf ); + pParent = static_cast( pLeaf ); + res.guards.copy( search_result::Guard_updGrandParent, search_result::Guard_updParent ); + updGrandParent = updParent; + + updParent = search_protect_update( res, pParent->m_pUpdate ); + + switch ( updParent.bits() ) { + case update_desc::DFlag: + case update_desc::Mark: + m_Stat.onSearchRetry(); + goto retry; + } + + pLeaf = protect_child_node( res, pParent, false, updParent ); + if ( !pLeaf ) { + m_Stat.onSearchRetry(); + goto retry; + } + } + + if ( pLeaf->infinite_key()) + return false; + + res.pGrandParent = pGrandParent; + res.pParent = pParent; + assert( pLeaf->is_leaf() ); + res.pLeaf = static_cast( pLeaf ); + res.updParent = updParent; + res.updGrandParent = updGrandParent; + res.bRightParent = false; + res.bRightLeaf = false; + + return true; + } + + bool search_max( search_result& res ) const + { + internal_node * pParent; + internal_node * pGrandParent; + update_ptr updParent; + update_ptr updGrandParent; + bool bRightLeaf; + bool bRightParent = false; + + retry: + pParent = null_ptr< internal_node *>(); + pGrandParent = null_ptr(); + updParent = null_ptr(); + bRightLeaf = false; + tree_node * pLeaf = const_cast( &m_Root ); + while ( pLeaf->is_internal() ) { + res.guards.copy( search_result::Guard_GrandParent, search_result::Guard_Parent ); + pGrandParent = pParent; + res.guards.copy( search_result::Guard_Parent, search_result::Guard_Leaf ); + pParent = static_cast( pLeaf ); + bRightParent = bRightLeaf; + res.guards.copy( search_result::Guard_updGrandParent, search_result::Guard_updParent ); + updGrandParent = updParent; + + updParent = search_protect_update( res, pParent->m_pUpdate ); + + switch ( updParent.bits() ) { + case update_desc::DFlag: + case update_desc::Mark: + m_Stat.onSearchRetry(); + goto retry; + } + + bRightLeaf = !pParent->infinite_key(); + pLeaf = protect_child_node( res, pParent, bRightLeaf, updParent ); + if ( !pLeaf ) { + m_Stat.onSearchRetry(); + goto retry; + } + } + + if ( pLeaf->infinite_key()) + return false; + + res.pGrandParent = pGrandParent; + res.pParent = pParent; + assert( pLeaf->is_leaf() ); + res.pLeaf = static_cast( pLeaf ); + res.updParent = updParent; + res.updGrandParent = updGrandParent; + res.bRightParent = bRightParent; + res.bRightLeaf = bRightLeaf; + + return true; + } + + void help( update_ptr pUpdate ) + { + // pUpdate must be guarded! + switch ( pUpdate.bits() ) { + case update_desc::IFlag: + help_insert( pUpdate.ptr() ); + m_Stat.onHelpInsert(); + break; + case update_desc::DFlag: + help_delete( pUpdate.ptr() ); + m_Stat.onHelpDelete(); + break; + case update_desc::Mark: + //m_Stat.onHelpMark(); + //help_marked( pUpdate.ptr() ); + break; + } + } + + void help_insert( update_desc * pOp ) + { + // pOp must be guarded + + tree_node * pLeaf = static_cast( pOp->iInfo.pLeaf ); + if ( pOp->iInfo.bRightLeaf ) { + CDS_VERIFY( pOp->iInfo.pParent->m_pRight.compare_exchange_strong( pLeaf, static_cast( pOp->iInfo.pNew ), + memory_model::memory_order_relaxed, CDS_ATOMIC::memory_order_relaxed )); + } + else { + CDS_VERIFY( pOp->iInfo.pParent->m_pLeft.compare_exchange_strong( pLeaf, static_cast( pOp->iInfo.pNew ), + memory_model::memory_order_relaxed, CDS_ATOMIC::memory_order_relaxed )); + } + + // Unflag parent + update_ptr cur( pOp, update_desc::IFlag ); + CDS_VERIFY( pOp->iInfo.pParent->m_pUpdate.compare_exchange_strong( cur, pOp->iInfo.pParent->null_update_desc(), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } + + bool check_delete_precondition( search_result& res ) const + { + // precondition: all member of res must be guarded + + assert( res.pGrandParent != null_ptr() ); + + return + static_cast( + res.bRightParent + ? res.pGrandParent->m_pRight.load(memory_model::memory_order_relaxed) + : res.pGrandParent->m_pLeft.load(memory_model::memory_order_relaxed) + ) == res.pParent + && + static_cast( + res.bRightLeaf + ? res.pParent->m_pRight.load(memory_model::memory_order_relaxed) + : res.pParent->m_pLeft.load(memory_model::memory_order_relaxed) + ) == res.pLeaf; + } + + bool help_delete( update_desc * pOp ) + { + // precondition: pOp must be guarded + + update_ptr pUpdate( pOp->dInfo.pUpdateParent ); + update_ptr pMark( pOp, update_desc::Mark ); + if ( pOp->dInfo.pParent->m_pUpdate.compare_exchange_strong( pUpdate, pMark, // * + memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + { + help_marked( pOp ); + + retire_node( pOp->dInfo.pParent ); + retire_node( pOp->dInfo.pLeaf ); + retire_update_desc( pOp ); + return true; + } + else if ( pUpdate == pMark ) { + // some other thread is processing help_marked() + help_marked( pOp ); + m_Stat.onHelpMark(); + return true; + } + else { + // Undo grandparent dInfo + update_ptr pDel( pOp, update_desc::DFlag ); + if ( pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( pDel, pOp->dInfo.pGrandParent->null_update_desc(), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + { + retire_update_desc( pOp ); + } + return false; + } + } + + tree_node * protect_sibling( typename gc::Guard& guard, CDS_ATOMIC::atomic& sibling ) + { + typename gc::Guard guardLeaf; + + tree_node * pSibling; + tree_node * p = sibling.load( memory_model::memory_order_relaxed ); + do { + pSibling = p; + guard.assign( static_cast(p) ); + guardLeaf.assign( node_traits::to_value_ptr( static_cast(p))); + } while ( pSibling != ( p = sibling.load( memory_model::memory_order_acquire )) ); + + if ( pSibling->is_leaf() ) + guard.copy( guardLeaf ); + + return pSibling; + } + + void help_marked( update_desc * pOp ) + { + // precondition: pOp must be guarded + + tree_node * pParent = pOp->dInfo.pParent; + + typename gc::Guard guard; + tree_node * pOpposite = protect_sibling( guard, pOp->dInfo.bRightLeaf ? pOp->dInfo.pParent->m_pLeft : pOp->dInfo.pParent->m_pRight ); + + if ( pOp->dInfo.bRightParent ) { + CDS_VERIFY( pOp->dInfo.pGrandParent->m_pRight.compare_exchange_strong( pParent, pOpposite, + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } + else { + CDS_VERIFY( pOp->dInfo.pGrandParent->m_pLeft.compare_exchange_strong( pParent, pOpposite, + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } + + update_ptr upd( pOp, update_desc::DFlag ); + CDS_VERIFY( pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( upd, pOp->dInfo.pGrandParent->null_update_desc(), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } + + bool try_insert( value_type& val, internal_node * pNewInternal, search_result& res ) + { + assert( res.updParent.bits() == update_desc::Clean ); + assert( res.pLeaf->is_leaf() ); + + // check search result + if ( ( res.bRightLeaf + ? res.pParent->m_pRight.load( memory_model::memory_order_acquire ) + : res.pParent->m_pLeft.load( memory_model::memory_order_acquire ) ) == res.pLeaf ) + { + leaf_node * pNewLeaf = node_traits::to_node_ptr( val ); + + int nCmp = node_compare()( val, *res.pLeaf ); + if ( nCmp < 0 ) { + if ( res.pGrandParent ) { + assert( !res.pLeaf->infinite_key() ); + pNewInternal->infinite_key( 0 ); + key_extractor()( pNewInternal->m_Key, *node_traits::to_value_ptr( res.pLeaf ) ); + } + else { + assert( res.pLeaf->infinite_key() == tree_node::key_infinite1 ); + pNewInternal->infinite_key( 1 ); + } + pNewInternal->m_pLeft.store( static_cast(pNewLeaf), memory_model::memory_order_relaxed ); + pNewInternal->m_pRight.store( static_cast(res.pLeaf), memory_model::memory_order_release ); + } + else { + assert( !res.pLeaf->is_internal() ); + pNewInternal->infinite_key( 0 ); + + key_extractor()( pNewInternal->m_Key, val ); + pNewInternal->m_pLeft.store( static_cast(res.pLeaf), memory_model::memory_order_relaxed ); + pNewInternal->m_pRight.store( static_cast(pNewLeaf), memory_model::memory_order_release ); + assert( !res.pLeaf->infinite_key()); + } + + typename gc::Guard guard; + update_desc * pOp = alloc_update_desc(); + guard.assign( pOp ); + + pOp->iInfo.pParent = res.pParent; + pOp->iInfo.pNew = pNewInternal; + pOp->iInfo.pLeaf = res.pLeaf; + pOp->iInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updCur( res.updParent.ptr() ); + if ( res.pParent->m_pUpdate.compare_exchange_strong( updCur, update_ptr( pOp, update_desc::IFlag ), + memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + { + // do insert + help_insert( pOp ); + retire_update_desc( pOp ); + return true; + } + else { + m_Stat.onUpdateDescDeleted(); + free_update_desc( pOp ); + } + } + return false; + } + + template + bool erase_( Q const& val, Compare cmp, Equal eq, Func f ) + { + update_desc * pOp = null_ptr(); + search_result res; + + for ( ;; ) { + if ( !search( res, val, cmp ) || !eq( val, *res.pLeaf ) ) { + if ( pOp ) + retire_update_desc( pOp ); + m_Stat.onEraseFailed(); + return false; + } + + if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { + if ( !pOp ) + pOp = alloc_update_desc(); + if ( check_delete_precondition( res ) ) { + typename gc::Guard guard; + guard.assign( pOp ); + + pOp->dInfo.pGrandParent = res.pGrandParent; + pOp->dInfo.pParent = res.pParent; + pOp->dInfo.pLeaf = res.pLeaf; + pOp->dInfo.pUpdateParent = res.updParent.ptr(); + pOp->dInfo.bRightParent = res.bRightParent; + pOp->dInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updGP( res.updGrandParent.ptr() ); + if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), + memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + { + if ( help_delete( pOp )) { + // res.pLeaf is not deleted yet since it is guarded + cds::unref(f)( *node_traits::to_value_ptr( res.pLeaf )); + break; + } + pOp = null_ptr(); + } + } + } + + m_Stat.onEraseRetry(); + } + + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + + template + bool extract_( typename gc::Guard& guard, Q const& key ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_( key, node_compare(), + []( Q const&, leaf_node const& ) -> bool { return true; }, + [&guard]( value_type& found ) { guard.assign( &found ); } ); +# else + assign_guard_functor f( guard ); + return erase_( key, node_compare(), trivial_equal_functor(), cds::ref(f) ); +# endif + } + + template + bool extract_with_( typename gc::Guard& guard, Q const& key, Less pred ) + { + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_( key, compare_functor(), + []( Q const&, leaf_node const& ) -> bool { return true; }, + [&guard]( value_type& found ) { guard.assign( &found ); } ); +# else + assign_guard_functor f( guard ); + return erase_( key, compare_functor(), trivial_equal_functor(), cds::ref(f) ); +# endif + } + + bool extract_max_( typename gc::Guard& guard ) + { + update_desc * pOp = null_ptr(); + search_result res; + + for ( ;; ) { + if ( !search_max( res )) { + // Tree is empty + if ( pOp ) + retire_update_desc( pOp ); + m_Stat.onExtractMaxFailed(); + return false; + } + + if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { + if ( !pOp ) + pOp = alloc_update_desc(); + if ( check_delete_precondition( res ) ) { + typename gc::Guard guard; + guard.assign( pOp ); + + pOp->dInfo.pGrandParent = res.pGrandParent; + pOp->dInfo.pParent = res.pParent; + pOp->dInfo.pLeaf = res.pLeaf; + pOp->dInfo.pUpdateParent = res.updParent.ptr(); + pOp->dInfo.bRightParent = res.bRightParent; + pOp->dInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updGP( res.updGrandParent.ptr() ); + if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), + memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + { + if ( help_delete( pOp )) + break; + pOp = null_ptr(); + } + } + } + + m_Stat.onExtractMaxRetry(); + } + + --m_ItemCounter; + m_Stat.onExtractMaxSuccess(); + guard.assign( node_traits::to_value_ptr( res.pLeaf ) ); + return true; + } + + bool extract_min_( typename gc::Guard& guard ) + { + update_desc * pOp = null_ptr(); + search_result res; + + for ( ;; ) { + if ( !search_min( res )) { + // Tree is empty + if ( pOp ) + retire_update_desc( pOp ); + m_Stat.onExtractMinFailed(); + return false; + } + + if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { + if ( !pOp ) + pOp = alloc_update_desc(); + if ( check_delete_precondition( res ) ) { + typename gc::Guard guard; + guard.assign( pOp ); + + pOp->dInfo.pGrandParent = res.pGrandParent; + pOp->dInfo.pParent = res.pParent; + pOp->dInfo.pLeaf = res.pLeaf; + pOp->dInfo.pUpdateParent = res.updParent.ptr(); + pOp->dInfo.bRightParent = res.bRightParent; + pOp->dInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updGP( res.updGrandParent.ptr() ); + if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), + memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + { + if ( help_delete( pOp )) + break; + pOp = null_ptr(); + } + } + } + + m_Stat.onExtractMinRetry(); + } + + --m_ItemCounter; + m_Stat.onExtractMinSuccess(); + guard.assign( node_traits::to_value_ptr( res.pLeaf )); + return true; + } + + template + bool find_( Q& val, Func f ) const + { + search_result res; + if ( search( res, val, node_compare() )) { + assert( res.pLeaf ); + cds::unref(f)( *node_traits::to_value_ptr( res.pLeaf ), val ); + + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + + template + bool find_with_( Q& val, Less pred, Func f ) const + { + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + + search_result res; + if ( search( res, val, compare_functor() )) { + assert( res.pLeaf ); + cds::unref(f)( *node_traits::to_value_ptr( res.pLeaf ), val ); + + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + + template + bool get_( typename gc::Guard& guard, Q const& val ) const + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return find_( val, [&guard]( value_type& found, Q const& ) { guard.assign( &found ); } ); +# else + assign_guard_functor f(guard); + return find_( val, cds::ref(f) ); +# endif + } + + template + bool get_with_( typename gc::Guard& guard, Q const& val, Less pred ) const + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return find_with_( val, pred, [&guard]( value_type& found, Q const& ) { guard.assign( &found ); } ); +# else + assign_guard_functor f(guard); + return find_with_( val, pred, cds::ref(f) ); +# endif + } + + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_ELLEN_BINTREE_IMPL_H diff --git a/cds/intrusive/ellen_bintree_ptb.h b/cds/intrusive/ellen_bintree_ptb.h new file mode 100644 index 00000000..3ed45f24 --- /dev/null +++ b/cds/intrusive/ellen_bintree_ptb.h @@ -0,0 +1,9 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_ELLEN_BINTREE_PTB_H +#define __CDS_INTRUSIVE_ELLEN_BINTREE_PTB_H + +#include +#include + +#endif // #ifndef __CDS_INTRUSIVE_ELLEN_BINTREE_PTB_H diff --git a/cds/intrusive/ellen_bintree_rcu.h b/cds/intrusive/ellen_bintree_rcu.h new file mode 100644 index 00000000..52ba3ae2 --- /dev/null +++ b/cds/intrusive/ellen_bintree_rcu.h @@ -0,0 +1,2071 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_ELLEN_BINTREE_RCU_H +#define __CDS_INTRUSIVE_ELLEN_BINTREE_RCU_H + +#include +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + //@cond + namespace ellen_bintree { + + template + struct base_node >: public basic_node + { + typedef basic_node base_class; + + base_node * m_pNextRetired; + + typedef cds::urcu::gc gc ; ///< Garbage collector + + /// Constructs leaf (bIntrenal == false) or internal (bInternal == true) node + explicit base_node( bool bInternal ) + : basic_node( bInternal ? internal : 0 ) + , m_pNextRetired( null_ptr() ) + {} + }; + + } // namespace ellen_bintree + //@endcond + + /// Ellen's et al binary search tree (RCU specialization) + /** @ingroup cds_intrusive_map + @ingroup cds_intrusive_tree + @anchor cds_intrusive_EllenBinTree_rcu + + Source: + - [2010] F.Ellen, P.Fatourou, E.Ruppert, F.van Breugel "Non-blocking Binary Search Tree" + + %EllenBinTree is an unbalanced leaf-oriented binary search tree that implements the set + abstract data type. Nodes maintains child pointers but not parent pointers. + Every internal node has exactly two children, and all data of type \p T currently in + the tree are stored in the leaves. Internal nodes of the tree are used to direct \p find + operation along the path to the correct leaf. The keys (of \p Key type) stored in internal nodes + may or may not be in the set. \p Key type is a subset of \p T type. + There should be exactly defined a key extracting functor for converting object of type \p T to + object of type \p Key. + + Due to \p extract_min and \p extract_max member functions the \p %EllenBinTree can act as + a priority queue. In this case you should provide unique compound key, for example, + the priority value plus some uniformly distributed random value. + + @warning Recall the tree is unbalanced. The complexity of operations is O(log N) + for uniformly distributed random keys, but in worst case the complexity is O(N). + + @note In the current implementation we do not use helping technique described in original paper. + So, the current implementation is near to fine-grained lock-based tree. + Helping will be implemented in future release + + Template arguments : + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p Key - key type, a subset of \p T + - \p T - type to be stored in tree's leaf nodes. The type must be based on ellen_bintree::node + (for ellen_bintree::base_hook) or it must have a member of type ellen_bintree::node + (for ellen_bintree::member_hook). + - \p Traits - type traits. See ellen_bintree::type_traits for explanation. + + It is possible to declare option-based tree with cds::intrusive::ellen_bintree::make_traits metafunction + instead of \p Traits template argument. + Template argument list \p Options of cds::intrusive::ellen_bintree::make_traits metafunction are: + - opt::hook - hook used. Possible values are: ellen_bintree::base_hook, ellen_bintree::member_hook, ellen_bintree::traits_hook. + If the option is not specified, ellen_bintree::base_hook<> is used. + - ellen_bintree::key_extractor - key extracting functor, mandatory option. The functor has the following prototype: + \code + struct key_extractor { + void operator ()( Key& dest, T const& src ); + }; + \endcode + It should initialize \p dest key from \p src data. The functor is used to initialize internal nodes. + - opt::compare - key compare functor. No default functor is provided. + If the option is not specified, \p %opt::less is used. + - opt::less - specifies binary predicate used for key compare. At least \p %opt::compare or \p %opt::less should be defined. + - opt::disposer - the functor used for dispose removed nodes. Default is opt::v::empty_disposer. Due the nature + of GC schema the disposer may be called asynchronously. The disposer is used only for leaf nodes. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - ellen_bintree::update_desc_allocator - an allocator of \ref ellen_bintree::update_desc "update descriptors", + default is CDS_DEFAULT_ALLOCATOR. + Note that update descriptor is helping data structure with short lifetime and it is good candidate for pooling. + The number of simultaneously existing descriptors is bounded and depends on the number of threads + working with the tree and RCU buffer size (if RCU is buffered). + Therefore, a bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue is good candidate + for the free-list of update descriptors, see cds::memory::vyukov_queue_pool free-list implementation. + Also notice that size of update descriptor is constant and not dependent on the type of data + stored in the tree so single free-list object can be used for all \p %EllenBinTree objects. + - opt::node_allocator - the allocator used for internal nodes. Default is \ref CDS_DEFAULT_ALLOCATOR. + - opt::stat - internal statistics. Available types: ellen_bintree::stat, ellen_bintree::empty_stat (the default) + - opt::rcu_check_deadlock - a deadlock checking policy. Default is opt::v::rcu_throw_deadlock + + @anchor cds_intrusive_EllenBinTree_rcu_less + Predicate requirements + + opt::less, opt::compare and other predicates using with member fuctions should accept at least parameters + of type \p T and \p Key in any combination. + For example, for \p Foo struct with \p std::string key field the appropiate \p less functor is: + \code + struct Foo: public cds::intrusive::ellen_bintree::node< ... > + { + std::string m_strKey; + ... + }; + + struct less { + bool operator()( Foo const& v1, Foo const& v2 ) const + { return v1.m_strKey < v2.m_strKey ; } + + bool operator()( Foo const& v, std::string const& s ) const + { return v.m_strKey < s ; } + + bool operator()( std::string const& s, Foo const& v ) const + { return s < v.m_strKey ; } + + // Support comparing std::string and char const * + bool operator()( std::string const& s, char const * p ) const + { return s.compare(p) < 0 ; } + + bool operator()( Foo const& v, char const * p ) const + { return v.m_strKey.compare(p) < 0 ; } + + bool operator()( char const * p, std::string const& s ) const + { return s.compare(p) > 0; } + + bool operator()( char const * p, Foo const& v ) const + { return v.m_strKey.compare(p) > 0; } + }; + \endcode + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + + Usage + + Suppose we have the following Foo struct with string key type: + \code + struct Foo { + std::string m_strKey ; // The key + //... // other non-key data + }; + \endcode + + We want to utilize RCU-based \p %cds::intrusive::EllenBinTree set for \p Foo data. + We may use base hook or member hook. Consider base hook variant. + First, we need deriving \p Foo struct from \p cds::intrusive::ellen_bintree::node: + \code + #include + #include + + // RCU type we use + typedef cds::urcu::gc< cds::urcu::general_buffered<> > gpb_rcu; + + struct Foo: public cds::intrusive:ellen_bintree::node< gpb_rcu > + { + std::string m_strKey ; // The key + //... // other non-key data + }; + \endcode + + Second, we need to implement auxiliary structures and functors: + - key extractor functor for extracting the key from \p Foo object. + Such functor is necessary because the tree internal nodes store the keys. + - \p less predicate. We want our set should accept \p std::string + and char const * parameters for searching, so our \p less + predicate should be non-trivial, see below. + - item counting feature: we want our set's \p size() member function + returns actual item count. + + \code + // Key extractor functor + struct my_key_extractor + { + void operator ()( std::string& key, Foo const& src ) const + { + key = src.m_strKey; + } + }; + + // Less predicate + struct my_less { + bool operator()( Foo const& v1, Foo const& v2 ) const + { return v1.m_strKey < v2.m_strKey ; } + + bool operator()( Foo const& v, std::string const& s ) const + { return v.m_strKey < s ; } + + bool operator()( std::string const& s, Foo const& v ) const + { return s < v.m_strKey ; } + + // Support comparing std::string and char const * + bool operator()( std::string const& s, char const * p ) const + { return s.compare(p) < 0 ; } + + bool operator()( Foo const& v, char const * p ) const + { return v.m_strKey.compare(p) < 0 ; } + + bool operator()( char const * p, std::string const& s ) const + { return s.compare(p) > 0; } + + bool operator()( char const * p, Foo const& v ) const + { return v.m_strKey.compare(p) > 0; } + }; + + // Type traits for our set + // It is necessary to specify only those typedefs that differ from + // cds::intrusive::ellen_bintree::type_traits defaults. + struct set_traits: public cds::intrusive::ellen_bintree::type_traits + { + typedef cds::intrusive::ellen_bintree::base_hook< cds::opt::gc > > hook; + typedef my_key_extractor key_extractor; + typedef my_less less; + typedef cds::atomicity::item_counter item_counter; + }; + \endcode + + Now we declare \p %EllenBinTree set and use it: + \code + typedef cds::intrusive::EllenBinTree< gpb_rcu, std::string, Foo, set_traits > set_type; + + set_type theSet; + // ... + \endcode + + Instead of declaring \p set_traits type traits we can use option-based syntax with \p %make_traits metafunction, + for example: + \code + typedef cds::intrusive::EllenBinTree< gpb_rcu, std::string, Foo, + typename cds::intrusive::ellen_bintree::make_traits< + cds::opt::hook< cds::intrusive::ellen_bintree::base_hook< cds::opt::gc > > + ,cds::intrusive::ellen_bintree::key_extractor< my_key_extractor > + ,cds::opt::less< my_less > + ,cds::opt::item_counter< cds::atomicity::item_counter > + >::type + > set_type2; + \endcode + + Functionally, \p set_type and \p set_type2 are equivalent. + + Member-hooked tree + + Sometimes, we cannot use base hook, for example, when the \p Foo structure is external. + In such case we can use member hook feature. + \code + #include + #include + + // Struct Foo is external and its declaration cannot be modified. + struct Foo { + std::string m_strKey ; // The key + //... // other non-key data + }; + + // RCU type we use + typedef cds::urcu::gc< cds::urcu::general_buffered<> > gpb_rcu; + + // Foo wrapper + struct MyFoo + { + Foo m_foo; + cds::intrusive:ellen_bintree::node< gpb_rcu > set_hook ; // member hook + }; + + // Key extractor functor + struct member_key_extractor + { + void operator ()( std::string& key, MyFoo const& src ) const + { + key = src.m_foo.m_strKey; + } + }; + + // Less predicate + struct member_less { + bool operator()( MyFoo const& v1, MyFoo const& v2 ) const + { return v1.m_foo.m_strKey < v2.m_foo.m_strKey ; } + + bool operator()( MyFoo const& v, std::string const& s ) const + { return v.m_foo.m_strKey < s ; } + + bool operator()( std::string const& s, MyFoo const& v ) const + { return s < v.m_foo.m_strKey ; } + + // Support comparing std::string and char const * + bool operator()( std::string const& s, char const * p ) const + { return s.compare(p) < 0 ; } + + bool operator()( MyFoo const& v, char const * p ) const + { return v.m_foo.m_strKey.compare(p) < 0 ; } + + bool operator()( char const * p, std::string const& s ) const + { return s.compare(p) > 0; } + + bool operator()( char const * p, MyFoo const& v ) const + { return v.m_foo.m_strKey.compare(p) > 0; } + }; + + // Type traits for our member-based set + struct member_set_traits: public cds::intrusive::ellen_bintree::type_traits + { + cds::intrusive::ellen_bintree::member_hook< offsetof(MyFoo, set_hook), cds::opt::gc > > hook; + typedef member_key_extractor key_extractor; + typedef member_less less; + typedef cds::atomicity::item_counter item_counter; + }; + + // Tree containing MyFoo objects + typedef cds::intrusive::EllenBinTree< gpb_rcu, std::string, MyFoo, member_set_traits > member_set_type; + + member_set_type theMemberSet; + \endcode + + Multiple containers + + Sometimes we need that our \p Foo struct should be used in several different containers. + Suppose, \p Foo struct has two key fields: + \code + struct Foo { + std::string m_strKey ; // string key + int m_nKey ; // int key + //... // other non-key data fields + }; + \endcode + + We want to build two intrusive \p %EllenBinTree sets: one indexed on \p Foo::m_strKey field, + another indexed on \p Foo::m_nKey field. To decide such case we should use a tag option for + tree's hook: + \code + #include + #include + + // RCU type we use + typedef cds::urcu::gc< cds::urcu::general_buffered<> > gpb_rcu; + + // Declare tag structs + struct int_tag ; // in key tag + struct string_tag ; // string key tag + + // Foo struct is derived from two ellen_bintree::node class + // with different tags + struct Foo + : public cds::intrusive::ellen_bintree::node< gpb_rcu, cds::opt::tag< string_tag > > + , public cds::intrusive::ellen_bintree::node< gpb_rcu >, cds::opt::tag< int_tag > + { + std::string m_strKey ; // string key + int m_nKey ; // int key + //... // other non-key data fields + }; + + // String key extractor functor + struct string_key_extractor + { + void operator ()( std::string& key, Foo const& src ) const + { + key = src.m_strKey; + } + }; + + // Int key extractor functor + struct int_key_extractor + { + void operator ()( int& key, Foo const& src ) const + { + key = src.m_nKey; + } + }; + + // String less predicate + struct string_less { + bool operator()( Foo const& v1, Foo const& v2 ) const + { return v1.m_strKey < v2.m_strKey ; } + + bool operator()( Foo const& v, std::string const& s ) const + { return v.m_strKey < s ; } + + bool operator()( std::string const& s, Foo const& v ) const + { return s < v.m_strKey ; } + + // Support comparing std::string and char const * + bool operator()( std::string const& s, char const * p ) const + { return s.compare(p) < 0 ; } + + bool operator()( Foo const& v, char const * p ) const + { return v.m_strKey.compare(p) < 0 ; } + + bool operator()( char const * p, std::string const& s ) const + { return s.compare(p) > 0; } + + bool operator()( char const * p, Foo const& v ) const + { return v.m_strKey.compare(p) > 0; } + }; + + // Int less predicate + struct int_less { + bool operator()( Foo const& v1, Foo const& v2 ) const + { return v1.m_nKey < v2.m_nKey ; } + + bool operator()( Foo const& v, int n ) const + { return v.m_nKey < n ; } + + bool operator()( int n, Foo const& v ) const + { return n < v.m_nKey ; } + }; + + // Type traits for string-indexed set + struct string_set_traits: public cds::intrusive::ellen_bintree::type_traits + { + typedef cds::intrusive::ellen_bintree::base_hook< cds::opt::gc >, cds::opt::tag< string_tag > > hook; + typedef string_key_extractor key_extractor; + typedef string_less less; + typedef cds::atomicity::item_counter item_counter; + }; + + // Type traits for int-indexed set + struct int_set_traits: public cds::intrusive::ellen_bintree::type_traits + { + typedef cds::intrusive::ellen_bintree::base_hook< cds::opt::gc >, cds::opt::tag< int_tag > > hook; + typedef int_key_extractor key_extractor; + typedef int_less less; + typedef cds::atomicity::item_counter item_counter; + }; + + // Declare string-indexed set + typedef cds::intrusive::EllenBinTree< gpb_rcu, std::string, Foo, string_set_traits > string_set_type; + string_set_type theStringSet; + + // Declare int-indexed set + typedef cds::intrusive::EllenBinTree< gpb_rcu, int, Foo, int_set_traits > int_set_type; + int_set_type theIntSet; + + // Now we can use theStringSet and theIntSet in our program + // ... + \endcode + */ + template < class RCU, + typename Key, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = ellen_bintree::type_traits +#else + class Traits +#endif + > + class EllenBinTree< cds::urcu::gc, Key, T, Traits > + { + public: + typedef cds::urcu::gc gc ; ///< RCU Garbage collector + typedef Key key_type ; ///< type of a key stored in internal nodes; key is a part of \p value_type + typedef T value_type ; ///< type of value stored in the binary tree + typedef Traits options ; ///< Traits template parameter + + typedef typename options::hook hook ; ///< hook type + typedef typename hook::node_type node_type ; ///< node type + + typedef typename options::disposer disposer ; ///< leaf node disposer + + protected: + //@cond + typedef ellen_bintree::base_node< gc > tree_node ; ///< Base type of tree node + typedef node_type leaf_node ; ///< Leaf node type + typedef ellen_bintree::internal_node< key_type, leaf_node > internal_node ; ///< Internal node type + typedef ellen_bintree::update_desc< leaf_node, internal_node> update_desc ; ///< Update descriptor + typedef typename update_desc::update_ptr update_ptr ; ///< Marked pointer to update descriptor + //@endcond + + public: + typedef cds::urcu::exempt_ptr< gc, value_type, value_type, disposer, void > exempt_ptr ; ///< pointer to extracted node + + public: +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key compare functor based on opt::compare and opt::less option setter. + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< Node traits +# else + typedef typename opt::details::make_comparator< value_type, options >::type key_comparator; + struct node_traits: public get_node_traits< value_type, node_type, hook>::type + { + static internal_node const& to_internal_node( tree_node const& n ) + { + assert( n.is_internal() ); + return static_cast( n ); + } + + static leaf_node const& to_leaf_node( tree_node const& n ) + { + assert( n.is_leaf() ); + return static_cast( n ); + } + }; +# endif + + typedef typename options::item_counter item_counter; ///< Item counting policy used + typedef typename options::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename options::stat stat ; ///< internal statistics type + typedef typename options::rcu_check_deadlock rcu_check_deadlock ; ///< Deadlock checking policy + typedef typename options::key_extractor key_extractor ; ///< key extracting functor + + typedef typename options::node_allocator node_allocator ; ///< Internal node allocator + typedef typename options::update_desc_allocator update_desc_allocator ; ///< Update descriptor allocator + + typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock + + static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions do not require external locking + + protected: + //@cond + typedef ellen_bintree::details::compare< key_type, value_type, key_comparator, node_traits > node_compare; + + typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy; + + typedef cds::details::Allocator< internal_node, node_allocator > cxx_node_allocator; + typedef cds::details::Allocator< update_desc, update_desc_allocator > cxx_update_desc_allocator; + + struct search_result { + internal_node * pGrandParent; + internal_node * pParent; + leaf_node * pLeaf; + update_ptr updParent; + update_ptr updGrandParent; + bool bRightLeaf ; // true if pLeaf is right child of pParent, false otherwise + bool bRightParent ; // true if pParent is right child of pGrandParent, false otherwise + + search_result() + :pGrandParent( null_ptr() ) + ,pParent( null_ptr() ) + ,pLeaf( null_ptr() ) + ,bRightLeaf( false ) + ,bRightParent( false ) + {} + }; + //@endcond + + protected: + //@cond + internal_node m_Root ; ///< Tree root node (key= Infinite2) + leaf_node m_LeafInf1; + leaf_node m_LeafInf2; + //@endcond + + item_counter m_ItemCounter ; ///< item counter + mutable stat m_Stat ; ///< internal statistics + + protected: + //@cond + static void free_leaf_node( value_type * p ) + { + disposer()( p ); + } + + internal_node * alloc_internal_node() const + { + m_Stat.onInternalNodeCreated(); + internal_node * pNode = cxx_node_allocator().New(); + //pNode->clean(); + return pNode; + } + + static void free_internal_node( internal_node * pNode ) + { + cxx_node_allocator().Delete( pNode ); + } + + struct internal_node_deleter { + void operator()( internal_node * p) const + { + free_internal_node( p ); + } + }; + + typedef std::unique_ptr< internal_node, internal_node_deleter> unique_internal_node_ptr; + + update_desc * alloc_update_desc() const + { + m_Stat.onUpdateDescCreated(); + return cxx_update_desc_allocator().New(); + } + + static void free_update_desc( update_desc * pDesc ) + { + cxx_update_desc_allocator().Delete( pDesc ); + } + + class retired_list + { + update_desc * pUpdateHead; + tree_node * pNodeHead; + + private: + class forward_iterator + { + update_desc * m_pUpdate; + tree_node * m_pNode; + + public: + forward_iterator( retired_list const& l ) + : m_pUpdate( l.pUpdateHead ) + , m_pNode( l.pNodeHead ) + {} + + forward_iterator() + : m_pUpdate( null_ptr() ) + , m_pNode( null_ptr< tree_node *>() ) + {} + + cds::urcu::retired_ptr operator *() + { + if ( m_pUpdate ) { + return cds::urcu::retired_ptr( reinterpret_cast( m_pUpdate ), + reinterpret_cast( free_update_desc ) ); + } + if ( m_pNode ) { + if ( m_pNode->is_leaf() ) { + return cds::urcu::retired_ptr( reinterpret_cast( node_traits::to_value_ptr( static_cast( m_pNode ))), + reinterpret_cast< cds::urcu::free_retired_ptr_func>( free_leaf_node ) ); + } + else { + return cds::urcu::retired_ptr( reinterpret_cast( static_cast( m_pNode ) ), + reinterpret_cast( free_internal_node ) ); + } + } + return cds::urcu::retired_ptr( null_ptr(), + reinterpret_cast( free_update_desc ) ); + } + + void operator ++() + { + if ( m_pUpdate ) { + m_pUpdate = m_pUpdate->pNextRetire; + return; + } + if ( m_pNode ) + m_pNode = m_pNode->m_pNextRetired; + } + + friend bool operator ==( forward_iterator const& i1, forward_iterator const& i2 ) + { + return i1.m_pUpdate == i2.m_pUpdate && i1.m_pNode == i2.m_pNode; + } + friend bool operator !=( forward_iterator const& i1, forward_iterator const& i2 ) + { + return !( i1 == i2 ); + } + }; + + public: + retired_list() + : pUpdateHead( null_ptr() ) + , pNodeHead( null_ptr() ) + {} + + ~retired_list() + { + gc::batch_retire( forward_iterator(*this), forward_iterator() ); + } + + void push( update_desc * p ) + { + p->pNextRetire = pUpdateHead; + pUpdateHead = p; + } + + void push( tree_node * p ) + { + p->m_pNextRetired = pNodeHead; + pNodeHead = p; + } + }; + + void retire_node( tree_node * pNode, retired_list& rl ) const + { + if ( pNode->is_leaf() ) { + assert( static_cast( pNode ) != &m_LeafInf1 ); + assert( static_cast( pNode ) != &m_LeafInf2 ); + } + else { + assert( static_cast( pNode ) != &m_Root ); + m_Stat.onInternalNodeDeleted(); + } + rl.push( pNode ); + } + + void retire_update_desc( update_desc * p, retired_list& rl, bool bDirect ) const + { + m_Stat.onUpdateDescDeleted(); + if ( bDirect ) + free_update_desc( p ); + else + rl.push( p ); + } + + void make_empty_tree() + { + m_Root.infinite_key( 2 ); + m_LeafInf1.infinite_key( 1 ); + m_LeafInf2.infinite_key( 2 ); + m_Root.m_pLeft.store( &m_LeafInf1, memory_model::memory_order_relaxed ); + m_Root.m_pRight.store( &m_LeafInf2, memory_model::memory_order_release ); + } + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct trivial_equal_functor { + template + bool operator()( Q const& , leaf_node const& ) const + { + return true; + } + }; + + struct empty_insert_functor { + void operator()( value_type& ) + {} + }; +# endif +# if !defined(CDS_CXX11_LAMBDA_SUPPORT) || (CDS_COMPILER == CDS_COMPILER_MSVC && CDS_COMPILER_VERSION == CDS_COMPILER_MSVC10) + struct unlink_equal_functor { + bool operator()( value_type const& v, leaf_node const& n ) const + { + return &v == node_traits::to_value_ptr( n ); + } + }; + struct empty_erase_functor { + void operator()( value_type const& ) + {} + }; +# endif + //@endcond + + public: + /// Default constructor + EllenBinTree() + { + static_assert( (!std::is_same< key_extractor, opt::none >::value), "The key extractor option must be specified" ); + make_empty_tree(); + } + + /// Clears the tree + ~EllenBinTree() + { + unsafe_clear(); + } + + /// Inserts new node + /** + The function inserts \p val in the tree if it does not contain + an item with key equal to \p val. + + The function applies RCU lock internally. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return insert( val, []( value_type& ) {} ); +# else + return insert( val, empty_insert_functor() ); +# endif + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the tree + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this tree's item by concurrent threads. + The user-defined functor is called only if the inserting is success and may be passed by reference + using boost::ref + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool insert( value_type& val, Func f ) + { + check_deadlock_policy::check(); + + unique_internal_node_ptr pNewInternal; + retired_list updRetire; + + { + rcu_lock l; + + search_result res; + for ( ;; ) { + if ( search( res, val, node_compare() )) { + if ( pNewInternal.get() ) + m_Stat.onInternalNodeDeleted() ; // unique_internal_node_ptr deletes internal node + m_Stat.onInsertFailed(); + return false; + } + + if ( res.updParent.bits() != update_desc::Clean ) + help( res.updParent, updRetire ); + else { + if ( !pNewInternal.get() ) + pNewInternal.reset( alloc_internal_node() ); + + if ( try_insert( val, pNewInternal.get(), res, updRetire )) { + cds::unref(f)( val ); + pNewInternal.release() ; // internal node is linked into the tree and should not be deleted + break; + } + } + + m_Stat.onInsertRetry(); + } + } + + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + + return true; + } + + /// Ensures that the \p val exists in the tree + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the tree, then \p val is inserted into the tree. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the tree + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + The functor can change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You can pass \p func argument by value or by reference using boost::ref or cds::ref. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns std::pair where \p first is \p true if operation is successfull, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the tree. + */ + template + std::pair ensure( value_type& val, Func func ) + { + check_deadlock_policy::check(); + + unique_internal_node_ptr pNewInternal; + retired_list updRetire; + + { + rcu_lock l; + + search_result res; + for ( ;; ) { + if ( search( res, val, node_compare() )) { + cds::unref(func)( false, *node_traits::to_value_ptr( res.pLeaf ), val ); + if ( pNewInternal.get() ) + m_Stat.onInternalNodeDeleted() ; // unique_internal_node_ptr deletes internal node + m_Stat.onEnsureExist(); + return std::make_pair( true, false ); + } + + if ( res.updParent.bits() != update_desc::Clean ) + help( res.updParent, updRetire ); + else { + if ( !pNewInternal.get() ) + pNewInternal.reset( alloc_internal_node() ); + + if ( try_insert( val, pNewInternal.get(), res, updRetire )) { + cds::unref(func)( true, val, val ); + pNewInternal.release() ; // internal node is linked into the tree and should not be deleted + break; + } + } + m_Stat.onEnsureRetry(); + } + } + + ++m_ItemCounter; + m_Stat.onEnsureNew(); + + return std::make_pair( true, true ); + } + + /// Unlinks the item \p val from the tree + /** + The function searches the item \p val in the tree and unlink it from the tree + if it is found and is equal to \p val. + + Difference between \ref erase and \p unlink functions: \p erase finds a key + and deletes the item found. \p unlink finds an item by key and deletes it + only if \p val is an item of the tree, i.e. the pointer to item found + is equal to &val . + + RCU \p synchronize method can be called. RCU should not be locked. + + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { +# if defined(CDS_CXX11_LAMBDA_SUPPORT) && !(CDS_COMPILER == CDS_COMPILER_MSVC && CDS_COMPILER_VERSION == CDS_COMPILER_MSVC10) + // vc10 generates an error for the lambda - it sees cds::intrusive::node_traits but not class-defined node_traits + return erase_( val, node_compare(), + []( value_type const& v, leaf_node const& n ) -> bool { return &v == node_traits::to_value_ptr( n ); }, + [](value_type const&) {} ); +# else + return erase_( val, node_compare(), unlink_equal_functor(), empty_erase_functor() ); +# endif + } + + /// Deletes the item from the tree + /** \anchor cds_intrusive_EllenBinTree_rcu_erase + The function searches an item with key equal to \p val in the tree, + unlinks it from the tree, and returns \p true. + If the item with key equal to \p val is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool erase( const Q& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_( val, node_compare(), + []( Q const&, leaf_node const& ) -> bool { return true; }, + [](value_type const&) {} ); +# else + return erase_( val, node_compare(), trivial_equal_functor(), empty_erase_functor() ); +# endif + } + + /// Delete the item from the tree with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_rcu_erase "erase(Q const&)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + bool erase_with( const Q& val, Less pred ) + { + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_( val, compare_functor(), + []( Q const&, leaf_node const& ) -> bool { return true; }, + [](value_type const&) {} ); +# else + return erase_( val, compare_functor(), trivial_equal_functor(), empty_erase_functor() ); +# endif + } + + /// Deletes the item from the tree + /** \anchor cds_intrusive_EllenBinTree_rcu_erase_func + The function searches an item with key equal to \p val in the tree, + call \p f functor with item found, unlinks it from the tree, and returns \p true. + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + The functor can be passed by reference with boost:ref + + If the item with key equal to \p val is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool erase( Q const& val, Func f ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_( val, node_compare(), + []( Q const&, leaf_node const& ) -> bool { return true; }, + f ); +# else + return erase_( val, node_compare(), trivial_equal_functor(), f ); +# endif + } + + /// Delete the item from the tree with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_rcu_erase_func "erase(Q const&, Func)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + bool erase_with( Q const& val, Less pred, Func f ) + { + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_( val, compare_functor(), + []( Q const&, leaf_node const& ) -> bool { return true; }, + f ); +# else + return erase_( val, compare_functor(), trivial_equal_functor(), f ); +# endif + } + + /// Extracts an item with minimal key from the tree + /** + The function searches an item with minimal key, unlinks it, and returns pointer to an item found in \p result parameter. + If the tree is empty the function returns \p false. + + @note Due the concurrent nature of the tree, the function extracts nearly minimum key. + It means that the function gets leftmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when \p result object is destroyed or when + result.release() is called, see cds::urcu::exempt_ptr for explanation. + @note Before reusing \p result object you should call its \p release() method. + */ + bool extract_min(exempt_ptr& result) + { + return extract_min_(result); + } + + /// Extracts an item with maximal key from the tree + /** + The function searches an item with maximal key, unlinks it, and returns pointer to an item found in \p result prameter. + If the tree is empty the function returns \p false. + + @note Due the concurrent nature of the tree, the function extracts nearly maximal key. + It means that the function gets rightmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key great than rightmost item's key. + So, the function returns the item with maximum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when \p result object is destroyed or when + result.release() is called, see cds::urcu::exempt_ptr for explanation. + @note Before reusing \p result object you should call its \p release() method. + */ + bool extract_max(exempt_ptr& result) + { + return extract_max_(result); + } + + /// Extracts an item from the tree + /** \anchor cds_intrusive_EllenBinTree_rcu_extract + The function searches an item with key equal to \p key in the tree, + unlinks it, and returns pointer to an item found in \p result parameter. + If the item with the key equal to \p key is not found the function returns \p false. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when \p result object is destroyed or when + result.release() is called, see cds::urcu::exempt_ptr for explanation. + @note Before reusing \p result object you should call its \p release() method. + */ + template + bool extract( exempt_ptr& result, Q const& key ) + { + return extract_( result, key, node_compare() ); + } + + /// Extracts an item from the set using \p pred for searching + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_rcu_extract "extract(exempt_ptr&, Q const&)" + but \p pred is used for key compare. + \p Less has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less + "predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + bool extract_with( exempt_ptr& dest, Q const& val, Less pred ) + { + return extract_with_( dest, val, pred ); + } + + /// Finds the key \p val + /** @anchor cds_intrusive_EllenBinTree_rcu_find_val + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function applies RCU lock internally. + */ + template + bool find( Q const& val ) const + { + rcu_lock l; + search_result res; + if ( search( res, val, node_compare() )) { + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + + /// Finds the key \p val with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_rcu_find_val "find(Q const&)" + but \p pred is used for key compare. + \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + \p pred should accept arguments of type \p Q, \p key_type, \p value_type in any combination. + */ + template + bool find_with( Q const& val, Less pred ) const + { + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + + rcu_lock l; + search_result res; + if ( search( res, val, compare_functor() )) { + m_Stat.onFindSuccess(); + return true; + } + m_Stat.onFindFailed(); + return false; + } + + /// Finds the key \p val + /** @anchor cds_intrusive_EllenBinTree_rcu_find_func + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the tree \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + The function applies RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) const + { + return find_( val, f ); + } + + /// Finds the key \p val with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_rcu_find_func "find(Q&, Func)" + but \p pred is used for key comparison. + \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + bool find_with( Q& val, Less pred, Func f ) const + { + return find_with_( val, pred, f ); + } + + /// Finds the key \p val + /** @anchor cds_intrusive_EllenBinTree_rcu_find_cfunc + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the tree \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The function applies RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) const + { + return find_( val, f ); + } + + /// Finds the key \p val with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_rcu_find_cfunc "find(Q const&, Func)" + but \p pred is used for key compare. + \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + bool find_with( Q const& val, Less pred, Func f ) const + { + return find_with_( val, pred, f ); + } + + /// Finds \p key and return the item found + /** \anchor cds_intrusive_EllenBinTree_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns \p NULL. + + RCU should be locked before call the function. + Returned pointer is valid while RCU is locked. + */ + template + value_type * get( Q const& key ) const + { + return get_( key, node_compare() ); + } + + /// Finds \p key with \p pred predicate and return the item found + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + value_type * get_with( Q const& key, Less pred ) const + { + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + + return get_( key, compare_functor()); + } + + /// Checks if the tree is empty + bool empty() const + { + return m_Root.m_pLeft.load( memory_model::memory_order_relaxed )->is_leaf(); + } + + /// Clears the tree (thread safe, non-atomic) + /** + The function unlink all items from the tree. + The function is thread safe but not atomic: in multi-threaded environment with parallel insertions + this sequence + \code + set.clear(); + assert( set.empty() ); + \endcode + the assertion could be raised. + + For each leaf the \ref disposer will be called after unlinking. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + void clear() + { + exempt_ptr ep; + while ( extract_min(ep) ) + ep.release(); + } + + /// Clears the tree (not thread safe) + /** + This function is not thread safe and may be called only when no other thread deals with the tree. + The function is used in the tree destructor. + */ + void unsafe_clear() + { + rcu_lock l; + + while ( true ) { + internal_node * pParent = null_ptr< internal_node *>(); + internal_node * pGrandParent = null_ptr(); + tree_node * pLeaf = const_cast( &m_Root ); + + // Get leftmost leaf + while ( pLeaf->is_internal() ) { + pGrandParent = pParent; + pParent = static_cast( pLeaf ); + pLeaf = pParent->m_pLeft.load( memory_model::memory_order_relaxed ); + } + + if ( pLeaf->infinite_key()) { + // The tree is empty + return; + } + + // Remove leftmost leaf and its parent node + assert( pGrandParent ); + assert( pParent ); + assert( pLeaf->is_leaf() ); + + pGrandParent->m_pLeft.store( pParent->m_pRight.load( memory_model::memory_order_relaxed ), memory_model::memory_order_relaxed ); + free_leaf_node( node_traits::to_value_ptr( static_cast( pLeaf ) ) ); + free_internal_node( pParent ); + } + } + + /// Returns item count in the tree + /** + Only leaf nodes containing user data are counted. + + The value returned depends on item counter type provided by \p Traits template parameter. + If it is atomicity::empty_item_counter this function always returns 0. + Therefore, the function is not suitable for checking the tree emptiness, use \ref empty + member function for this purpose. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + /// Checks internal consistency (not atomic, not thread-safe) + /** + The debugging function to check internal consistency of the tree. + */ + bool check_consistency() const + { + return check_consistency( &m_Root ); + } + + protected: + //@cond + + bool check_consistency( internal_node const * pRoot ) const + { + tree_node * pLeft = pRoot->m_pLeft.load( CDS_ATOMIC::memory_order_relaxed ); + tree_node * pRight = pRoot->m_pRight.load( CDS_ATOMIC::memory_order_relaxed ); + assert( pLeft ); + assert( pRight ); + + if ( node_compare()( *pLeft, *pRoot ) < 0 + && node_compare()( *pRoot, *pRight ) <= 0 + && node_compare()( *pLeft, *pRight ) < 0 ) + { + bool bRet = true; + if ( pLeft->is_internal() ) + bRet = check_consistency( static_cast( pLeft ) ); + assert( bRet ); + + if ( bRet && pRight->is_internal() ) + bRet = bRet && check_consistency( static_cast( pRight )); + assert( bRet ); + + return bRet; + } + return false; + } + + void help( update_ptr pUpdate, retired_list& rl ) + { + /* + switch ( pUpdate.bits() ) { + case update_desc::IFlag: + help_insert( pUpdate.ptr() ); + m_Stat.onHelpInsert(); + break; + case update_desc::DFlag: + //help_delete( pUpdate.ptr(), rl ); + //m_Stat.onHelpDelete(); + break; + case update_desc::Mark: + //help_marked( pUpdate.ptr() ); + //m_Stat.onHelpMark(); + break; + } + */ + } + + void help_insert( update_desc * pOp ) + { + assert( gc::is_locked() ); + + tree_node * pLeaf = static_cast( pOp->iInfo.pLeaf ); + if ( pOp->iInfo.bRightLeaf ) { + pOp->iInfo.pParent->m_pRight.compare_exchange_strong( pLeaf, static_cast( pOp->iInfo.pNew ), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + } + else { + pOp->iInfo.pParent->m_pLeft.compare_exchange_strong( pLeaf, static_cast( pOp->iInfo.pNew ), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + } + + update_ptr cur( pOp, update_desc::IFlag ); + pOp->iInfo.pParent->m_pUpdate.compare_exchange_strong( cur, pOp->iInfo.pParent->null_update_desc(), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + } + + bool check_delete_precondition( search_result& res ) + { + assert( res.pGrandParent != null_ptr() ); + + return + static_cast( res.bRightParent + ? res.pGrandParent->m_pRight.load(memory_model::memory_order_relaxed) + : res.pGrandParent->m_pLeft.load(memory_model::memory_order_relaxed) + ) == res.pParent + && + static_cast( res.bRightLeaf + ? res.pParent->m_pRight.load(memory_model::memory_order_relaxed) + : res.pParent->m_pLeft.load(memory_model::memory_order_relaxed) + ) == res.pLeaf; + } + + bool help_delete( update_desc * pOp, retired_list& rl ) + { + assert( gc::is_locked() ); + + update_ptr pUpdate( pOp->dInfo.pUpdateParent ); + update_ptr pMark( pOp, update_desc::Mark ); + if ( pOp->dInfo.pParent->m_pUpdate.compare_exchange_strong( pUpdate, pMark, + memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + { + help_marked( pOp ); + retire_node( pOp->dInfo.pParent, rl ); + // For extract operations the leaf should NOT be disposed + if ( pOp->dInfo.bDisposeLeaf ) + retire_node( pOp->dInfo.pLeaf, rl ); + retire_update_desc( pOp, rl, false ); + + return true; + } + else if ( pUpdate == pMark ) { + // some other thread is processing help_marked() + help_marked( pOp ); + m_Stat.onHelpMark(); + return true; + } + else { + // pUpdate has been changed by CAS + help( pUpdate, rl ); + + // Undo grandparent dInfo + update_ptr pDel( pOp, update_desc::DFlag ); + if ( pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( pDel, pOp->dInfo.pGrandParent->null_update_desc(), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + { + retire_update_desc( pOp, rl, false ); + } + return false; + } + } + + void help_marked( update_desc * pOp ) + { + assert( gc::is_locked() ); + + tree_node * p = pOp->dInfo.pParent; + if ( pOp->dInfo.bRightParent ) { + pOp->dInfo.pGrandParent->m_pRight.compare_exchange_strong( p, + pOp->dInfo.bRightLeaf + ? pOp->dInfo.pParent->m_pLeft.load( memory_model::memory_order_acquire ) + : pOp->dInfo.pParent->m_pRight.load( memory_model::memory_order_acquire ), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + } + else { + pOp->dInfo.pGrandParent->m_pLeft.compare_exchange_strong( p, + pOp->dInfo.bRightLeaf + ? pOp->dInfo.pParent->m_pLeft.load( memory_model::memory_order_acquire ) + : pOp->dInfo.pParent->m_pRight.load( memory_model::memory_order_acquire ), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + } + + update_ptr upd( pOp, update_desc::DFlag ); + pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( upd, pOp->dInfo.pGrandParent->null_update_desc(), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + } + + template + bool search( search_result& res, KeyValue const& key, Compare cmp ) const + { + assert( gc::is_locked() ); + + internal_node * pParent; + internal_node * pGrandParent = null_ptr(); + tree_node * pLeaf; + update_ptr updParent; + update_ptr updGrandParent; + bool bRightLeaf; + bool bRightParent = false; + + int nCmp = 0; + + retry: + pParent = null_ptr(); + pLeaf = const_cast( &m_Root ); + updParent = null_ptr(); + bRightLeaf = false; + while ( pLeaf->is_internal() ) { + pGrandParent = pParent; + pParent = static_cast( pLeaf ); + bRightParent = bRightLeaf; + updGrandParent = updParent; + updParent = pParent->m_pUpdate.load( memory_model::memory_order_acquire ); + + switch ( updParent.bits() ) { + case update_desc::DFlag: + case update_desc::Mark: + m_Stat.onSearchRetry(); + goto retry; + } + + nCmp = cmp( key, *pParent ); + bRightLeaf = nCmp >= 0; + pLeaf = nCmp < 0 ? pParent->m_pLeft.load( memory_model::memory_order_acquire ) + : pParent->m_pRight.load( memory_model::memory_order_acquire ); + } + + assert( pLeaf->is_leaf() ); + nCmp = cmp( key, *static_cast(pLeaf) ); + + res.pGrandParent = pGrandParent; + res.pParent = pParent; + res.pLeaf = static_cast( pLeaf ); + res.updParent = updParent; + res.updGrandParent = updGrandParent; + res.bRightParent = bRightParent; + res.bRightLeaf = bRightLeaf; + + return nCmp == 0; + } + + bool search_min( search_result& res ) const + { + assert( gc::is_locked() ); + + internal_node * pParent; + internal_node * pGrandParent = null_ptr(); + tree_node * pLeaf; + update_ptr updParent; + update_ptr updGrandParent; + + retry: + pParent = null_ptr< internal_node *>(); + pLeaf = const_cast( &m_Root ); + while ( pLeaf->is_internal() ) { + pGrandParent = pParent; + pParent = static_cast( pLeaf ); + updGrandParent = updParent; + updParent = pParent->m_pUpdate.load( memory_model::memory_order_acquire ); + + switch ( updParent.bits() ) { + case update_desc::DFlag: + case update_desc::Mark: + m_Stat.onSearchRetry(); + goto retry; + } + + pLeaf = pParent->m_pLeft.load( memory_model::memory_order_acquire ); + } + + if ( pLeaf->infinite_key()) + return false; + + res.pGrandParent = pGrandParent; + res.pParent = pParent; + assert( pLeaf->is_leaf() ); + res.pLeaf = static_cast( pLeaf ); + res.updParent = updParent; + res.updGrandParent = updGrandParent; + res.bRightParent = false; + res.bRightLeaf = false; + + return true; + } + + bool search_max( search_result& res ) const + { + assert( gc::is_locked() ); + + internal_node * pParent; + internal_node * pGrandParent = null_ptr(); + tree_node * pLeaf; + update_ptr updParent; + update_ptr updGrandParent; + bool bRightLeaf; + bool bRightParent = false; + + retry: + pParent = null_ptr< internal_node *>(); + pLeaf = const_cast( &m_Root ); + bRightLeaf = false; + while ( pLeaf->is_internal() ) { + pGrandParent = pParent; + pParent = static_cast( pLeaf ); + bRightParent = bRightLeaf; + updGrandParent = updParent; + updParent = pParent->m_pUpdate.load( memory_model::memory_order_acquire ); + + switch ( updParent.bits() ) { + case update_desc::DFlag: + case update_desc::Mark: + m_Stat.onSearchRetry(); + goto retry; + } + + if ( pParent->infinite_key()) { + pLeaf = pParent->m_pLeft.load( memory_model::memory_order_acquire ); + bRightLeaf = false; + } + else { + pLeaf = pParent->m_pRight.load( memory_model::memory_order_acquire ); + bRightLeaf = true; + } + } + + if ( pLeaf->infinite_key()) + return false; + + res.pGrandParent = pGrandParent; + res.pParent = pParent; + assert( pLeaf->is_leaf() ); + res.pLeaf = static_cast( pLeaf ); + res.updParent = updParent; + res.updGrandParent = updGrandParent; + res.bRightParent = bRightParent; + res.bRightLeaf = bRightLeaf; + + return true; + } + + template + bool erase_( Q const& val, Compare cmp, Equal eq, Func f ) + { + check_deadlock_policy::check(); + + retired_list updRetire; + update_desc * pOp = null_ptr(); + search_result res; + + { + rcu_lock l; + for ( ;; ) { + if ( !search( res, val, cmp ) || !eq( val, *res.pLeaf ) ) { + if ( pOp ) + retire_update_desc( pOp, updRetire, false ); + m_Stat.onEraseFailed(); + return false; + } + + if ( res.updGrandParent.bits() != update_desc::Clean ) + help( res.updGrandParent, updRetire ); + else if ( res.updParent.bits() != update_desc::Clean ) + help( res.updParent, updRetire ); + else { + if ( !pOp ) + pOp = alloc_update_desc(); + if ( check_delete_precondition( res ) ) { + pOp->dInfo.pGrandParent = res.pGrandParent; + pOp->dInfo.pParent = res.pParent; + pOp->dInfo.pLeaf = res.pLeaf; + pOp->dInfo.bDisposeLeaf = true; + pOp->dInfo.pUpdateParent = res.updParent.ptr(); + pOp->dInfo.bRightParent = res.bRightParent; + pOp->dInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updGP( res.updGrandParent.ptr() ); + if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), + memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + { + if ( help_delete( pOp, updRetire )) { + // res.pLeaf is not deleted yet since RCU is blocked + cds::unref(f)( *node_traits::to_value_ptr( res.pLeaf )); + break; + } + pOp = null_ptr(); + } + else { + // updGP has been changed by CAS + help( updGP, updRetire ); + } + } + } + + m_Stat.onEraseRetry(); + } + } + + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + + template + bool extract_with_( ExemptPtr& dest, Q const& val, Less pred ) + { + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + + return extract_( dest, val, compare_functor() ); + } + + template + bool extract_( ExemptPtr& ptr, Q const& val, Compare cmp ) + { + check_deadlock_policy::check(); + + retired_list updRetire; + update_desc * pOp = null_ptr(); + search_result res; + + { + rcu_lock l; + for ( ;; ) { + if ( !search( res, val, cmp ) ) { + if ( pOp ) + retire_update_desc( pOp, updRetire, false ); + m_Stat.onEraseFailed(); + return false; + } + + if ( res.updGrandParent.bits() != update_desc::Clean ) + help( res.updGrandParent, updRetire ); + else if ( res.updParent.bits() != update_desc::Clean ) + help( res.updParent, updRetire ); + else { + if ( !pOp ) + pOp = alloc_update_desc(); + if ( check_delete_precondition( res )) { + pOp->dInfo.pGrandParent = res.pGrandParent; + pOp->dInfo.pParent = res.pParent; + pOp->dInfo.pLeaf = res.pLeaf; + pOp->dInfo.bDisposeLeaf = false; + pOp->dInfo.pUpdateParent = res.updParent.ptr(); + pOp->dInfo.bRightParent = res.bRightParent; + pOp->dInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updGP( res.updGrandParent.ptr() ); + if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), + memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + { + if ( help_delete( pOp, updRetire )) { + ptr = node_traits::to_value_ptr( res.pLeaf ); + break; + } + pOp = null_ptr(); + } + else { + // updGP has been changed by CAS + help( updGP, updRetire ); + } + } + } + + m_Stat.onEraseRetry(); + } + } + + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + + + template + bool extract_max_( ExemptPtr& result ) + { + check_deadlock_policy::check(); + + retired_list updRetire; + update_desc * pOp = null_ptr(); + search_result res; + + { + rcu_lock l; + for ( ;; ) { + if ( !search_max( res )) { + // Tree is empty + if ( pOp ) + retire_update_desc( pOp, updRetire, false ); + m_Stat.onExtractMaxFailed(); + return false; + } + + if ( res.updGrandParent.bits() != update_desc::Clean ) + help( res.updGrandParent, updRetire ); + else if ( res.updParent.bits() != update_desc::Clean ) + help( res.updParent, updRetire ); + else { + if ( !pOp ) + pOp = alloc_update_desc(); + if ( check_delete_precondition( res ) ) { + pOp->dInfo.pGrandParent = res.pGrandParent; + pOp->dInfo.pParent = res.pParent; + pOp->dInfo.pLeaf = res.pLeaf; + pOp->dInfo.bDisposeLeaf = false; + pOp->dInfo.pUpdateParent = res.updParent.ptr(); + pOp->dInfo.bRightParent = res.bRightParent; + pOp->dInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updGP( res.updGrandParent.ptr() ); + if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), + memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + { + if ( help_delete( pOp, updRetire )) { + result = node_traits::to_value_ptr( res.pLeaf ); + break; + } + pOp = null_ptr(); + } + else { + // updGP has been changed by CAS + help( updGP, updRetire ); + } + } + } + m_Stat.onExtractMaxRetry(); + } + } + + --m_ItemCounter; + m_Stat.onExtractMaxSuccess(); + return true; + } + + template + bool extract_min_(ExemptPtr& result) + { + check_deadlock_policy::check(); + + retired_list updRetire; + update_desc * pOp = null_ptr(); + search_result res; + + { + rcu_lock l; + for ( ;; ) { + if ( !search_min( res )) { + // Tree is empty + if ( pOp ) + retire_update_desc( pOp, updRetire, false ); + m_Stat.onExtractMinFailed(); + return false; + } + + if ( res.updGrandParent.bits() != update_desc::Clean ) + help( res.updGrandParent, updRetire ); + else if ( res.updParent.bits() != update_desc::Clean ) + help( res.updParent, updRetire ); + else { + if ( !pOp ) + pOp = alloc_update_desc(); + if ( check_delete_precondition( res ) ) { + pOp->dInfo.pGrandParent = res.pGrandParent; + pOp->dInfo.pParent = res.pParent; + pOp->dInfo.pLeaf = res.pLeaf; + pOp->dInfo.bDisposeLeaf = false; + pOp->dInfo.pUpdateParent = res.updParent.ptr(); + pOp->dInfo.bRightParent = res.bRightParent; + pOp->dInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updGP( res.updGrandParent.ptr() ); + if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), + memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + { + if ( help_delete( pOp, updRetire )) { + result = node_traits::to_value_ptr( res.pLeaf ); + break; + } + pOp = null_ptr(); + } + else { + // updGP has been changed by CAS + help( updGP, updRetire ); + } + } + } + + m_Stat.onExtractMinRetry(); + } + } + + --m_ItemCounter; + m_Stat.onExtractMinSuccess(); + return true; + } + + template + bool find_with_( Q& val, Less pred, Func f ) const + { + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + + rcu_lock l; + search_result res; + if ( search( res, val, compare_functor() )) { + assert( res.pLeaf ); + cds::unref(f)( *node_traits::to_value_ptr( res.pLeaf ), val ); + + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + + template + bool find_( Q& key, Func f ) const + { + rcu_lock l; + search_result res; + if ( search( res, key, node_compare() )) { + assert( res.pLeaf ); + cds::unref(f)( *node_traits::to_value_ptr( res.pLeaf ), key ); + + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + + template + value_type * get_( Q const& key, Compare cmp ) const + { + assert( gc::is_locked()); + + search_result res; + if ( search( res, key, cmp )) { + m_Stat.onFindSuccess(); + return node_traits::to_value_ptr( res.pLeaf ); + } + + m_Stat.onFindFailed(); + return null_ptr(); + } + + + bool try_insert( value_type& val, internal_node * pNewInternal, search_result& res, retired_list& updRetire ) + { + assert( gc::is_locked() ); + assert( res.updParent.bits() == update_desc::Clean ); + + // check search result + if ( static_cast( res.bRightLeaf + ? res.pParent->m_pRight.load( memory_model::memory_order_relaxed ) + : res.pParent->m_pLeft.load( memory_model::memory_order_relaxed ) ) == res.pLeaf ) + { + leaf_node * pNewLeaf = node_traits::to_node_ptr( val ); + + int nCmp = node_compare()( val, *res.pLeaf ); + if ( nCmp < 0 ) { + if ( res.pGrandParent ) { + pNewInternal->infinite_key( 0 ); + key_extractor()( pNewInternal->m_Key, *node_traits::to_value_ptr( res.pLeaf ) ); + assert( !res.pLeaf->infinite_key() ); + } + else { + assert( res.pLeaf->infinite_key() == tree_node::key_infinite1 ); + pNewInternal->infinite_key( 1 ); + } + pNewInternal->m_pLeft.store( static_cast(pNewLeaf), memory_model::memory_order_relaxed ); + pNewInternal->m_pRight.store( static_cast(res.pLeaf), memory_model::memory_order_release ); + } + else { + assert( !res.pLeaf->is_internal() ); + pNewInternal->infinite_key( 0 ); + + key_extractor()( pNewInternal->m_Key, val ); + pNewInternal->m_pLeft.store( static_cast(res.pLeaf), memory_model::memory_order_relaxed ); + pNewInternal->m_pRight.store( static_cast(pNewLeaf), memory_model::memory_order_release ); + assert( !res.pLeaf->infinite_key()); + } + + update_desc * pOp = alloc_update_desc(); + + pOp->iInfo.pParent = res.pParent; + pOp->iInfo.pNew = pNewInternal; + pOp->iInfo.pLeaf = res.pLeaf; + pOp->iInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updCur( res.updParent.ptr() ); + if ( res.pParent->m_pUpdate.compare_exchange_strong( updCur, update_ptr( pOp, update_desc::IFlag ), + memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + { + // do insert + help_insert( pOp ); + retire_update_desc( pOp, updRetire, false ); + return true; + } + else { + // updCur has been updated by CAS + help( updCur, updRetire ); + retire_update_desc( pOp, updRetire, true ); + } + } + return false; + } + + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_ELLEN_BINTREE_RCU_H diff --git a/cds/intrusive/fcqueue.h b/cds/intrusive/fcqueue.h new file mode 100644 index 00000000..ac56e021 --- /dev/null +++ b/cds/intrusive/fcqueue.h @@ -0,0 +1,345 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_FCQUEUE_H +#define __CDS_INTRUSIVE_FCQUEUE_H + +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// FCQueue related definitions + namespace fcqueue { + + /// FCQueue internal statistics + template + struct stat: public cds::algo::flat_combining::stat + { + typedef cds::algo::flat_combining::stat flat_combining_stat; ///< Flat-combining statistics + typedef typename flat_combining_stat::counter_type counter_type; ///< Counter type + + counter_type m_nEnqueue ; ///< Count of push operations + counter_type m_nDequeue ; ///< Count of success pop operations + counter_type m_nFailedDeq ; ///< Count of failed pop operations (pop from empty queue) + counter_type m_nCollided ; ///< How many pairs of push/pop were collided, if elimination is enabled + + //@cond + void onEnqueue() { ++m_nEnqueue; } + void onDequeue( bool bFailed ) { if ( bFailed ) ++m_nFailedDeq; else ++m_nDequeue; } + void onCollide() { ++m_nCollided; } + //@endcond + }; + + /// FCQueue dummy statistics, no overhead + struct empty_stat: public cds::algo::flat_combining::empty_stat + { + //@cond + void onEnqueue() {} + void onDequeue(bool) {} + void onCollide() {} + //@endcond + }; + + /// FCQueue type traits + struct type_traits: public cds::algo::flat_combining::type_traits + { + typedef cds::intrusive::opt::v::empty_disposer disposer ; ///< Disposer to erase removed elements. Used only in \p FCQueue::clear() function + typedef empty_stat stat; ///< Internal statistics + static CDS_CONSTEXPR_CONST bool enable_elimination = false; ///< Enable \ref cds_elimination_description "elimination" + }; + + /// Metafunction converting option list to traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + \p Options are: + - \p opt::lock_type - mutex type, default is \p cds::lock::Spin + - \p opt::back_off - back-off strategy, defalt is \p cds::backoff::Default + - \p opt::disposer - the functor used for dispose removed items. Default is opt::intrusive::v::empty_disposer. + This option is used only in \p FCQueue::clear() function. + - \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default) + - \p opt::memory_model - C++ memory ordering model. + List of all available memory ordering see opt::memory_model. + Default if cds::opt::v:relaxed_ordering + - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" + By default, the elimination is disabled. + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS8 >::type + ,CDS_OPTIONS8 + >::type type; +# endif + }; + } // namespace fcqueue + + /// Flat-combining intrusive queue + /** + @ingroup cds_intrusive_queue + @ingroup cds_flat_combining_intrusive + + \ref cds_flat_combining_description "Flat combining" sequential intrusive queue. + + Template parameters: + - \p T - a value type stored in the queue + - \p Container - sequential intrusive container with \p push_back and \p pop_front functions. + Default is \p boost::intrusive::list + - \p Traits - type traits of flat combining, default is \p fcqueue::type_traits. + \p fcqueue::make_traits metafunction can be used to construct specialized \p %type_traits + */ + template + ,typename Traits = fcqueue::type_traits + > + class FCQueue +#ifndef CDS_DOXYGEN_INVOKED + : public cds::algo::flat_combining::container +#endif + { + public: + typedef T value_type; ///< Value type + typedef Container container_type; ///< Sequential container type + typedef Traits type_traits; ///< Queue type traits + + typedef typename type_traits::disposer disposer; ///< The disposer functor. The disposer is used only in \ref clear() function + typedef typename type_traits::stat stat; ///< Internal statistics type + static CDS_CONSTEXPR_CONST bool c_bEliminationEnabled = type_traits::enable_elimination; ///< \p true if elimination is enabled + + protected: + //@cond + /// Queue operation IDs + enum fc_operation { + op_enq = cds::algo::flat_combining::req_Operation, ///< Enqueue + op_deq, ///< Dequeue + op_clear, ///< Clear + op_clear_and_dispose ///< Clear and dispose + }; + + /// Flat combining publication list record + struct fc_record: public cds::algo::flat_combining::publication_record + { + value_type * pVal; ///< Value to enqueue or dequeue + bool bEmpty; ///< \p true if the queue is empty + }; + //@endcond + + /// Flat combining kernel + typedef cds::algo::flat_combining::kernel< fc_record, type_traits > fc_kernel; + + protected: + //@cond + fc_kernel m_FlatCombining; + container_type m_Queue; + //@endcond + + public: + /// Initializes empty queue object + FCQueue() + {} + + /// Initializes empty queue object and gives flat combining parameters + FCQueue( + unsigned int nCompactFactor ///< Flat combining: publication list compacting factor + ,unsigned int nCombinePassCount ///< Flat combining: number of combining passes for combiner thread + ) + : m_FlatCombining( nCompactFactor, nCombinePassCount ) + {} + + /// Inserts a new element at the end of the queue + /** + The function always returns \p true. + */ + bool enqueue( value_type& val ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pVal = &val; + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_enq, pRec, *this ); + else + m_FlatCombining.combine( op_enq, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onEnqueue(); + return true; + } + + /// Inserts a new element at the end of the queue (a synonym for \ref enqueue) + bool push( value_type& val ) + { + return enqueue( val ); + } + + /// Removes the next element from the queue + /** + If the queue is empty the function returns \p nullptr + */ + value_type * dequeue() + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pVal = null_ptr(); + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_deq, pRec, *this ); + else + m_FlatCombining.combine( op_deq, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + + m_FlatCombining.internal_statistics().onDequeue( pRec->bEmpty ); + return pRec->pVal; + } + + /// Removes the next element from the queue (a synonym for \ref dequeue) + value_type * pop() + { + return dequeue(); + } + + /// Clears the queue + /** + If \p bDispose is \p true, the disposer provided in \p Traits class' template parameter + will be called for each removed element. + */ + void clear( bool bDispose = false ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( bDispose ? op_clear_and_dispose : op_clear, pRec, *this ); + else + m_FlatCombining.combine( bDispose ? op_clear_and_dispose : op_clear, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + } + + /// Returns the number of elements in the queue. + /** + Note that size() == 0 is not mean that the queue is empty because + combining record can be in process. + To check emptiness use \ref empty function. + */ + size_t size() const + { + return m_Queue.size(); + } + + /// Checks if the queue is empty + /** + If the combining is in process the function waits while it is done. + */ + bool empty() const + { + m_FlatCombining.wait_while_combining(); + return m_Queue.empty(); + } + + /// Internal statistics + stat const& statistics() const + { + return m_FlatCombining.statistics(); + } + + public: // flat combining cooperation, not for direct use! + //@cond + /// Flat combining supporting function. Do not call it directly! + /** + The function is called by \ref cds::algo::flat_combining::kernel "flat combining kernel" + object if the current thread becomes a combiner. Invocation of the function means that + the queue should perform an action recorded in \p pRec. + */ + void fc_apply( fc_record * pRec ) + { + assert( pRec ); + + switch ( pRec->op() ) { + case op_enq: + assert( pRec->pVal ); + m_Queue.push_back( *(pRec->pVal ) ); + break; + case op_deq: + pRec->bEmpty = m_Queue.empty(); + if ( !pRec->bEmpty ) { + pRec->pVal = &m_Queue.front(); + m_Queue.pop_front(); + } + break; + case op_clear: + m_Queue.clear(); + break; + case op_clear_and_dispose: + m_Queue.clear_and_dispose( disposer() ); + break; + default: + assert(false); + break; + } + } + + /// Batch-processing flat combining + void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd ) + { + typedef typename fc_kernel::iterator fc_iterator; + for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) { + switch ( it->op() ) { + case op_enq: + case op_deq: + if ( m_Queue.empty() ) { + if ( itPrev != itEnd && collide( *itPrev, *it )) + itPrev = itEnd; + else + itPrev = it; + } + break; + } + } + } + //@endcond + + private: + //@cond + bool collide( fc_record& rec1, fc_record& rec2 ) + { + assert( m_Queue.empty() ); + + switch ( rec1.op() ) { + case op_enq: + if ( rec2.op() == op_deq ) { + assert(rec1.pVal); + rec2.pVal = rec1.pVal; + rec2.bEmpty = false; + m_FlatCombining.operation_done( rec1 ); + m_FlatCombining.operation_done( rec2 ); + m_FlatCombining.internal_statistics().onCollide(); + return true; + } + break; + case op_deq: + if ( rec2.op() == op_enq ) { + assert(rec2.pVal); + rec1.pVal = rec2.pVal; + rec1.bEmpty = false; + m_FlatCombining.operation_done( rec1 ); + m_FlatCombining.operation_done( rec2 ); + m_FlatCombining.internal_statistics().onCollide(); + return true; + } + break; + } + return false; + } + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_FCQUEUE_H diff --git a/cds/intrusive/fcstack.h b/cds/intrusive/fcstack.h new file mode 100644 index 00000000..517b0208 --- /dev/null +++ b/cds/intrusive/fcstack.h @@ -0,0 +1,329 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_FCSTACK_H +#define __CDS_INTRUSIVE_FCSTACK_H + +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// FCStack related definitions + namespace fcstack { + + /// FCStack internal statistics + template + struct stat: public cds::algo::flat_combining::stat + { + typedef cds::algo::flat_combining::stat flat_combining_stat; ///< Flat-combining statistics + typedef typename flat_combining_stat::counter_type counter_type; ///< Counter type + + counter_type m_nPush ; ///< Count of push operations + counter_type m_nPop ; ///< Count of success pop operations + counter_type m_nFailedPop; ///< Count of failed pop operations (pop from empty stack) + counter_type m_nCollided ; ///< How many pairs of push/pop were collided, if elimination is enabled + + //@cond + void onPush() { ++m_nPush; } + void onPop( bool bFailed ) { if ( bFailed ) ++m_nFailedPop; else ++m_nPop; } + void onCollide() { ++m_nCollided; } + //@endcond + }; + + /// FCStack dummy statistics, no overhead + struct empty_stat: public cds::algo::flat_combining::empty_stat + { + //@cond + void onPush() {} + void onPop(bool) {} + void onCollide() {} + //@endcond + }; + + /// FCStack type traits + struct type_traits: public cds::algo::flat_combining::type_traits + { + typedef cds::intrusive::opt::v::empty_disposer disposer ; ///< Disposer to erase removed elements. Used only in \p FCStack::clear() function + typedef empty_stat stat; ///< Internal statistics + static CDS_CONSTEXPR_CONST bool enable_elimination = false; ///< Enable \ref cds_elimination_description "elimination" + }; + + /// Metafunction converting option list to traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + \p Options are: + - \p opt::lock_type - mutex type, default is \p cds::lock::Spin + - \p opt::back_off - back-off strategy, defalt is \p cds::backoff::Default + - \p opt::disposer - the functor used for dispose removed items. Default is opt::intrusive::v::empty_disposer. + This option is used only in \p FCStack::clear() function. + - \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default) + - \p opt::memory_model - C++ memory ordering model. + List of all available memory ordering see opt::memory_model. + Default if cds::opt::v:relaxed_ordering + - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" + By default, the elimination is disabled. + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS8 >::type + ,CDS_OPTIONS8 + >::type type; +# endif + }; + + } // namespace fcstack + + /// Flat-combining intrusive stack + /** + @ingroup cds_intrusive_stack + @ingroup cds_flat_combining_intrusive + + \ref cds_flat_combining_description "Flat combining" sequential intrusive stack. + + Template parameters: + - \p T - a value type stored in the stack + - \p Container - sequential intrusive container with \p push_front and \p pop_front functions. + Possible containers are \p boost::intrusive::slist (the default), \p boost::inrtrusive::list + - \p Traits - type traits of flat combining, default is \p fcstack::type_traits. + \p fcstack::make_traits metafunction can be used to construct specialized \p %type_traits + */ + template + ,typename Traits = fcstack::type_traits + > + class FCStack +#ifndef CDS_DOXYGEN_INVOKED + : public cds::algo::flat_combining::container +#endif + { + public: + typedef T value_type; ///< Value type + typedef Container container_type; ///< Sequential container type + typedef Traits type_traits; ///< Stack type traits + + typedef typename type_traits::disposer disposer; ///< The disposer functor. The disposer is used only in \ref clear() function + typedef typename type_traits::stat stat; ///< Internal statistics type + static CDS_CONSTEXPR_CONST bool c_bEliminationEnabled = type_traits::enable_elimination; ///< \p true if elimination is enabled + + protected: + //@cond + /// Stack operation IDs + enum fc_operation { + op_push = cds::algo::flat_combining::req_Operation, ///< Push + op_pop, ///< Pop + op_clear, ///< Clear + op_clear_and_dispose ///< Clear and dispose + }; + + /// Flat combining publication list record + struct fc_record: public cds::algo::flat_combining::publication_record + { + value_type * pVal; ///< Value to push or pop + bool bEmpty; ///< \p true if the stack is empty + }; + //@endcond + + /// Flat combining kernel + typedef cds::algo::flat_combining::kernel< fc_record, type_traits > fc_kernel; + + protected: + //@cond + fc_kernel m_FlatCombining; + container_type m_Stack; + //@endcond + + public: + /// Initializes empty stack object + FCStack() + {} + + /// Initializes empty stack object and gives flat combining parameters + FCStack( + unsigned int nCompactFactor ///< Flat combining: publication list compacting factor + ,unsigned int nCombinePassCount ///< Flat combining: number of combining passes for combiner thread + ) + : m_FlatCombining( nCompactFactor, nCombinePassCount ) + {} + + /// Inserts a new element at the top of stack + /** + The content of the new element initialized to a copy of \p val. + */ + bool push( value_type& val ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pVal = &val; + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_push, pRec, *this ); + else + m_FlatCombining.combine( op_push, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPush(); + return true; + } + + /// Removes the element on top of the stack + value_type * pop() + { + fc_record * pRec = m_FlatCombining.acquire_record(); + pRec->pVal = null_ptr(); + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_pop, pRec, *this ); + else + m_FlatCombining.combine( op_pop, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + + m_FlatCombining.internal_statistics().onPop( pRec->bEmpty ); + return pRec->pVal; + } + + /// Clears the stack + /** + If \p bDispose is \p true, the disposer provided in \p Traits class' template parameter + will be called for each removed element. + */ + void clear( bool bDispose = false ) + { + fc_record * pRec = m_FlatCombining.acquire_record(); + + if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( bDispose ? op_clear_and_dispose : op_clear, pRec, *this ); + else + m_FlatCombining.combine( bDispose ? op_clear_and_dispose : op_clear, pRec, *this ); + + assert( pRec->is_done() ); + m_FlatCombining.release_record( pRec ); + } + + /// Returns the number of elements in the stack. + /** + Note that size() == 0 is not mean that the stack is empty because + combining record can be in process. + To check emptiness use \ref empty function. + */ + size_t size() const + { + return m_Stack.size(); + } + + /// Checks if the stack is empty + /** + If the combining is in process the function waits while it is done. + */ + bool empty() const + { + m_FlatCombining.wait_while_combining(); + return m_Stack.empty(); + } + + /// Internal statistics + stat const& statistics() const + { + return m_FlatCombining.statistics(); + } + + + public: // flat combining cooperation, not for direct use! + //@cond + /// Flat combining supporting function. Do not call it directly! + /** + The function is called by \ref cds::algo::flat_combining::kernel "flat combining kernel" + object if the current thread becomes a combiner. Invocation of the function means that + the stack should perform an action recorded in \p pRec. + */ + void fc_apply( fc_record * pRec ) + { + assert( pRec ); + + switch ( pRec->op() ) { + case op_push: + assert( pRec->pVal ); + m_Stack.push_front( *(pRec->pVal ) ); + break; + case op_pop: + pRec->bEmpty = m_Stack.empty(); + if ( !pRec->bEmpty ) { + pRec->pVal = &m_Stack.front(); + m_Stack.pop_front(); + } + break; + case op_clear: + m_Stack.clear(); + break; + case op_clear_and_dispose: + m_Stack.clear_and_dispose( disposer() ); + break; + default: + assert(false); + break; + } + } + + /// Batch-processing flat combining + void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd ) + { + typedef typename fc_kernel::iterator fc_iterator; + for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) { + switch ( it->op() ) { + case op_push: + case op_pop: + if ( itPrev != itEnd && collide( *itPrev, *it )) + itPrev = itEnd; + else + itPrev = it; + break; + } + } + } + //@endcond + + private: + //@cond + bool collide( fc_record& rec1, fc_record& rec2 ) + { + switch ( rec1.op() ) { + case op_push: + if ( rec2.op() == op_pop ) { + assert(rec1.pVal); + rec2.pVal = rec1.pVal; + rec2.bEmpty = false; + m_FlatCombining.operation_done( rec1 ); + m_FlatCombining.operation_done( rec2 ); + m_FlatCombining.internal_statistics().onCollide(); + return true; + } + break; + case op_pop: + if ( rec2.op() == op_push ) { + assert(rec2.pVal); + rec1.pVal = rec2.pVal; + rec1.bEmpty = false; + m_FlatCombining.operation_done( rec1 ); + m_FlatCombining.operation_done( rec2 ); + m_FlatCombining.internal_statistics().onCollide(); + return true; + } + break; + } + return false; + } + //@endcond + + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_FCSTACK_H diff --git a/cds/intrusive/lazy_list_base.h b/cds/intrusive/lazy_list_base.h new file mode 100644 index 00000000..ce6ab5a3 --- /dev/null +++ b/cds/intrusive/lazy_list_base.h @@ -0,0 +1,320 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_LAZY_LIST_BASE_H +#define __CDS_INTRUSIVE_LAZY_LIST_BASE_H + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// LazyList ordered list related definitions + /** @ingroup cds_intrusive_helper + */ + namespace lazy_list { + /// Lazy list node + /** + Template parameters: + - GC - garbage collector + - Lock - lock type. Default is cds::lock::Spin + - Tag - a tag used to distinguish between different implementation. An incomplete type can be used as a tag. + */ + template < + class GC + ,typename Lock = lock::Spin + ,typename Tag = opt::none + > + struct node + { + typedef GC gc ; ///< Garbage collector + typedef Lock lock_type ; ///< Lock type + typedef Tag tag ; ///< tag + + typedef cds::details::marked_ptr marked_ptr ; ///< marked pointer + typedef typename gc::template atomic_marked_ptr< marked_ptr> atomic_marked_ptr ; ///< atomic marked pointer specific for GC + + atomic_marked_ptr m_pNext ; ///< pointer to the next node in the list + logical deletion mark + mutable lock_type m_Lock ; ///< Node lock + + /// Checks if node is marked + bool is_marked() const + { + return m_pNext.load(CDS_ATOMIC::memory_order_relaxed).bits() != 0; + } + + /// Default ctor + node() + : m_pNext( null_ptr()) + {} + }; + + //@cond + template + class boundary_nodes + { + typedef NodeType node_type; + + node_type m_Head; + node_type m_Tail; + + public: + node_type * head() + { + return &m_Head; + } + node_type const * head() const + { + return &m_Head; + } + node_type * tail() + { + return &m_Tail; + } + node_type const * tail() const + { + return &m_Tail; + } + }; + //@endcond + + //@cond + template + struct node_cleaner { + void operator()( Node * p ) + { + typedef typename Node::marked_ptr marked_ptr; + p->m_pNext.store( marked_ptr(), MemoryModel::memory_order_release ); + } + }; + //@endcond + + //@cond + struct undefined_gc; + struct default_hook { + typedef undefined_gc gc; + typedef opt::none tag; + typedef lock::Spin lock_type; + }; + //@endcond + + //@cond + template < typename HookType, CDS_DECL_OPTIONS3> + struct hook + { + typedef typename opt::make_options< default_hook, CDS_OPTIONS3>::type options; + typedef typename options::gc gc; + typedef typename options::tag tag; + typedef typename options::lock_type lock_type; + typedef node node_type; + typedef HookType hook_type; + }; + //@endcond + + /// Base hook + /** + \p Options are: + - opt::gc - garbage collector used. + - opt::lock_type - lock type used for node locking. Default is lock::Spin + - opt::tag - tag + */ + template < CDS_DECL_OPTIONS3 > + struct base_hook: public hook< opt::base_hook_tag, CDS_OPTIONS3 > + {}; + + /// Member hook + /** + \p MemberOffset defines offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - opt::gc - garbage collector used. + - opt::lock_type - lock type used for node locking. Default is lock::Spin + - opt::tag - tag + */ + template < size_t MemberOffset, CDS_DECL_OPTIONS3 > + struct member_hook: public hook< opt::member_hook_tag, CDS_OPTIONS3 > + { + //@cond + static const size_t c_nMemberOffset = MemberOffset; + //@endcond + }; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - opt::gc - garbage collector used. + - opt::lock_type - lock type used for node locking. Default is lock::Spin + - opt::tag - tag + */ + template + struct traits_hook: public hook< opt::traits_hook_tag, CDS_OPTIONS3 > + { + //@cond + typedef NodeTraits node_traits; + //@endcond + }; + + /// Check link + template + struct link_checker + { + //@cond + typedef Node node_type; + //@endcond + + /// Checks if the link field of node \p pNode is NULL + /** + An asserting is generated if \p pNode link field is not NULL + */ + static void is_empty( node_type const * pNode ) + { + assert( pNode->m_pNext.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr()); + } + }; + + //@cond + template + struct link_checker_selector; + + template + struct link_checker_selector< GC, Node, opt::never_check_link > + { + typedef intrusive::opt::v::empty_link_checker type; + }; + + template + struct link_checker_selector< GC, Node, opt::debug_check_link > + { +# ifdef _DEBUG + typedef link_checker type; +# else + typedef intrusive::opt::v::empty_link_checker type; +# endif + }; + + template + struct link_checker_selector< GC, Node, opt::always_check_link > + { + typedef link_checker type; + }; + //@endcond + + /// Metafunction for selecting appropriate link checking policy + template < typename Node, opt::link_check_type LinkType > + struct get_link_checker + { + //@cond + typedef typename link_checker_selector< typename Node::gc, Node, LinkType>::type type; + //@endcond + }; + + /// Type traits for LazyList class + struct type_traits + { + /// Hook used + /** + Possible values are: lazy_list::base_hook, lazy_list::member_hook, lazy_list::traits_hook. + */ + typedef base_hook<> hook; + + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// specifies binary predicate used for key comparison. + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// back-off strategy used + /** + If the option is not specified, the cds::backoff::Default is used. + */ + typedef cds::backoff::Default back_off; + + /// Disposer + /** + the functor used for dispose removed items. Default is opt::v::empty_disposer. + */ + typedef opt::v::empty_disposer disposer; + + /// Item counter + /** + The type for item counting feature. + Default is no item counter (\ref atomicity::empty_item_counter) + */ + typedef atomicity::empty_item_counter item_counter; + + /// Link fields checking feature + /** + Default is \ref opt::debug_check_link + */ + static const opt::link_check_type link_checker = opt::debug_check_link; + + /// Allocator + /** + For intrusive lazy list an allocator is needed for dummy tail node allocation. + */ + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// C++ memory ordering model + /** + List of available memory ordering see opt::memory_model + */ + typedef opt::v::relaxed_ordering memory_model; + + /// RCU deadlock checking policy (only for \ref cds_intrusive_LazyList_rcu "RCU-based LazyList") + /** + List of available options see opt::rcu_check_deadlock + */ + typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; + + //@cond + // for internal use only!!! + typedef opt::none boundary_node_type; + //@endcond + }; + + /// Metafunction converting option list to traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + + See \ref LazyList, \ref type_traits, \ref cds::opt::make_options. + + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS11 >::type + ,CDS_OPTIONS11 + >::type type; +# endif + }; + + } // namespace lazy_list + + //@cond + // Forward declaration + template < class GC, typename T, class Traits = lazy_list::type_traits > + class LazyList; + //@endcond + + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_LAZY_LIST_BASE_H diff --git a/cds/intrusive/lazy_list_hp.h b/cds/intrusive/lazy_list_hp.h new file mode 100644 index 00000000..b41d9e93 --- /dev/null +++ b/cds/intrusive/lazy_list_hp.h @@ -0,0 +1,9 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_LAZY_LIST_HP_H +#define __CDS_INTRUSIVE_LAZY_LIST_HP_H + +#include +#include + +#endif // #ifndef __CDS_INTRUSIVE_LAZY_LIST_HP_H diff --git a/cds/intrusive/lazy_list_hrc.h b/cds/intrusive/lazy_list_hrc.h new file mode 100644 index 00000000..dc0c8eb8 --- /dev/null +++ b/cds/intrusive/lazy_list_hrc.h @@ -0,0 +1,152 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_LAZY_LIST_HRC_H +#define __CDS_INTRUSIVE_LAZY_LIST_HRC_H + +#include +#include +#include + +namespace cds { namespace intrusive { namespace lazy_list { + //@cond + // Specialization for HRC GC + template + struct node< gc::HRC, Lock, Tag>: public gc::HRC::container_node + { + typedef gc::HRC gc ; ///< Garbage collector + typedef Lock lock_type ; ///< Lock type + typedef Tag tag ; ///< tag + + typedef cds::details::marked_ptr marked_ptr ; ///< marked pointer + typedef typename gc::template atomic_marked_ptr< marked_ptr> atomic_marked_ptr ; ///< atomic marked pointer specific for GC + + atomic_marked_ptr m_pNext ; ///< pointer to the next node in the list + logical deletion mark + mutable lock_type m_Lock ; ///< Node lock + + /// Checks if node is marked + bool is_marked() const + { + return m_pNext.load(CDS_ATOMIC::memory_order_relaxed).bits() != 0; + } + + node() + : m_pNext( null_ptr() ) + {} + + protected: + virtual void cleanUp( cds::gc::hrc::ThreadGC * pGC ) + { + assert( pGC != null_ptr() ); + typename gc::GuardArray<2> aGuards( *pGC ); + + while ( true ) { + marked_ptr pNextMarked( aGuards.protect( 0, m_pNext )); + node * pNext = pNextMarked.ptr(); + if ( pNext != null_ptr() && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) { + marked_ptr p = aGuards.protect( 1, pNext->m_pNext ); + m_pNext.compare_exchange_weak( pNextMarked, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ); + continue; + } + else { + break; + } + } + } + + virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent ) + { + if ( bConcurrent ) { + marked_ptr pNext( m_pNext.load(CDS_ATOMIC::memory_order_relaxed)); + do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ); + } + else { + m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed ); + } + } + }; + //@endcond + + //@cond + template + class boundary_nodes< gc::HRC, NodeType, Alloc > + { + typedef NodeType node_type; + typedef cds::details::Allocator< node_type, Alloc> cxx_allocator ; ///< allocator for the tail node + + struct boundary_disposer + { + void operator()( node_type * p ) + { + cxx_allocator().Delete( p ); + } + }; + + + node_type * m_pHead; + node_type * m_pTail; + + public: + boundary_nodes() + { + m_pHead = cxx_allocator().New(); + m_pTail = cxx_allocator().New(); + } + + ~boundary_nodes() + { + cds::gc::HRC::template retire( m_pHead ); + cds::gc::HRC::template retire( m_pTail ); + } + + public: + node_type * head() + { + return m_pHead; + } + node_type const * head() const + { + return m_pHead; + } + node_type * tail() + { + return m_pTail; + } + node_type const * tail() const + { + return m_pTail; + } + }; + //@endcond + + //@cond + /* + template + struct node_cleaner< gc::HRC, Node, MemoryModel> { + void operator()( Node * p ) + { + typedef typename Node::marked_ptr marked_ptr; + p->m_pNext.store( marked_ptr(), MemoryModel::memory_order_release ); + //p->clean( MemoryModel::memory_order_release ); + } + }; + */ + //@endcond + + + //@cond + template + struct link_checker_selector< gc::HRC, NODE, opt::never_check_link > + { + typedef link_checker type; + }; + + template + struct link_checker_selector< gc::HRC, NODE, opt::debug_check_link > + { + typedef link_checker type; + }; + //@endcond + +}}} // namespace cds::intrusive::lazy_list + +#endif // #ifndef __CDS_INTRUSIVE_LAZY_LIST_HP_H diff --git a/cds/intrusive/lazy_list_impl.h b/cds/intrusive/lazy_list_impl.h new file mode 100644 index 00000000..e97d4ebd --- /dev/null +++ b/cds/intrusive/lazy_list_impl.h @@ -0,0 +1,1234 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_LAZY_LIST_IMPL_H +#define __CDS_INTRUSIVE_LAZY_LIST_IMPL_H + +#include +#include + + +namespace cds { namespace intrusive { + + /// Lazy ordered single-linked list + /** @ingroup cds_intrusive_list + \anchor cds_intrusive_LazyList_hp + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Source: + - [2005] Steve Heller, Maurice Herlihy, Victor Luchangco, Mark Moir, William N. Scherer III, and Nir Shavit + "A Lazy Concurrent List-Based Set Algorithm" + + The lazy list is based on an optimistic locking scheme for inserts and removes, + eliminating the need to use the equivalent of an atomically markable + reference. It also has a novel wait-free membership \p find operation + that does not need to perform cleanup operations and is more efficient. + + Template arguments: + - \p GC - Garbage collector used. Note the \p GC must be the same as the GC used for item type \p T (see lazy_list::node). + - \p T - type to be stored in the list. The type must be based on lazy_list::node (for lazy_list::base_hook) + or it must have a member of type lazy_list::node (for lazy_list::member_hook). + - \p Traits - type traits. See lazy_list::type_traits for explanation. + + It is possible to declare option-based list with cds::intrusive::lazy_list::make_traits metafunction istead of \p Traits template + argument. For example, the following traits-based declaration of gc::HP lazy list + \code + #include + // Declare item stored in your list + struct item: public cds::intrusive::lazy_list::node< cds::gc::HP > + { ... }; + + // Declare comparator for the item + struct my_compare { ... } + + // Declare type_traits + struct my_traits: public cds::intrusive::lazy_list::type_traits + { + typedef cds::intrusive::lazy_list::base_hook< cds::opt::gc< cds::gc::HP > > hook; + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::intrusive::LazyList< cds::gc::HP, item, my_traits > traits_based_list; + \endcode + + is equivalent for the following option-based list + \code + #include + + // item struct and my_compare are the same + + // Declare option-based list + typedef cds::intrusive::LazyList< cds::gc::HP, item, + typename cds::intrusive::lazy_list::make_traits< + cds::intrusive::opt::hook< cds::intrusive::lazy_list::base_hook< cds::opt::gc< cds::gc::HP > > > // hook option + ,cds::intrusive::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + Template argument list \p Options of cds::intrusive::lazy_list::make_traits metafunction are: + - opt::hook - hook used. Possible values are: lazy_list::base_hook, lazy_list::member_hook, lazy_list::traits_hook. + If the option is not specified, lazy_list::base_hook<> and gc::HP is used. + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::Default is used. + - opt::disposer - the functor used for dispose removed items. Default is opt::v::empty_disposer. Due the nature + of GC schema the disposer may be called asynchronously. + - opt::link_checker - the type of node's link fields checking. Default is \ref opt::debug_check_link + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that means no item counting. + - opt::allocator - an allocator needed for dummy head and tail nodes. Default is \ref CDS_DEFAULT_ALLOCATOR. + The option applies only to gc::HRC garbage collector. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + + \par Usage + There are different specializations of this template for each garbage collecting schema used. + You should select GC needed and include appropriate .h-file: + - for gc::HP: \code #include \endcode + - for gc::PTB: \code #include \endcode + - for gc::HRC: \code #include \endcode + - for gc::nogc: \code #include \endcode + - for \ref cds_urcu_type "RCU" - see \ref cds_intrusive_LazyList_rcu "LazyList RCU specialization" + + Then, you should incorporate lazy_list::node into your struct \p T and provide + appropriate lazy_list::type_traits::hook in your \p Traits template parameters. Usually, for \p Traits + a struct based on lazy_list::type_traits should be defined. + + Example for gc::PTB and base hook: + \code + // Include GC-related lazy list specialization + #include + + // Data stored in lazy list + struct my_data: public cds::intrusive::lazy_list::node< cds::gc::PTB > + { + // key field + std::string strKey; + + // other data + // ... + }; + + // my_data comparing functor + struct compare { + int operator()( const my_data& d1, const my_data& d2 ) + { + return d1.strKey.compare( d2.strKey ); + } + + int operator()( const my_data& d, const std::string& s ) + { + return d.strKey.compare(s); + } + + int operator()( const std::string& s, const my_data& d ) + { + return s.compare( d.strKey ); + } + }; + + + // Declare type_traits + struct my_traits: public cds::intrusive::lazy_list::type_traits + { + typedef cds::intrusive::lazy_list::base_hook< cds::opt::gc< cds::gc::PTB > > hook; + typedef my_data_cmp compare; + }; + + // Declare list type + typedef cds::intrusive::LazyList< cds::gc::PTB, my_data, my_traits > traits_based_list; + \endcode + + Equivalent option-based code: + \code + // GC-related specialization + #include + + struct my_data { + // see above + }; + struct compare { + // see above + }; + + // Declare option-based list + typedef cds::intrusive::LazyList< cds::gc::PTB + ,my_data + , typename cds::intrusive::lazy_list::make_traits< + cds::intrusive::opt::hook< cds::intrusive::lazy_list::base_hook< cds::opt::gc< cds::gc::PTB > > > + ,cds::intrusive::opt::compare< my_data_cmp > + >::type + > option_based_list; + + \endcode + */ + template < + class GC + ,typename T +#ifdef CDS_DOXYGEN_INVOKED + ,class Traits = lazy_list::type_traits +#else + ,class Traits +#endif + > + class LazyList + { + public: + typedef T value_type ; ///< type of value stored in the list + typedef Traits options ; ///< Traits template parameter + + typedef typename options::hook hook ; ///< hook type + typedef typename hook::node_type node_type ; ///< node type + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter. +# else + typedef typename opt::details::make_comparator< value_type, options >::type key_comparator; +# endif + + typedef typename options::disposer disposer ; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + typedef typename lazy_list::get_link_checker< node_type, options::link_checker >::type link_checker ; ///< link checker + + typedef GC gc ; ///< Garbage collector + typedef typename options::back_off back_off ; ///< back-off strategy + typedef typename options::item_counter item_counter ; ///< Item counting policy used + typedef typename options::memory_model memory_model; ///< C++ memory ordering (see lazy_list::type_traits::memory_model) + + typedef cds::gc::guarded_ptr< gc, value_type > guarded_ptr; ///< Guarded pointer + + //@cond + // Rebind options (split-list support) + template + struct rebind_options { + typedef LazyList< + gc + , value_type + , typename cds::opt::make_options< options, CDS_OPTIONS8>::type + > type; + }; + //@endcond + + protected: + typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer + typedef node_type * auxiliary_head ; ///< Auxiliary head type (for split-list support) + + protected: + //@cond + typedef lazy_list::boundary_nodes< + gc + ,typename opt::select_default< typename options::boundary_node_type, node_type >::type + ,typename options::allocator + > boundary_nodes; + boundary_nodes m_Boundary ; ///< Head & tail dummy nodes + + node_type * head() + { + return m_Boundary.head(); + } + node_type const * head() const + { + return m_Boundary.head(); + } + node_type * tail() + { + return m_Boundary.tail(); + } + node_type const * tail() const + { + return m_Boundary.tail(); + } + //@endcond + + item_counter m_ItemCounter ; ///< Item counter + + //@cond + struct clean_disposer { + void operator()( value_type * p ) + { + lazy_list::node_cleaner()( node_traits::to_node_ptr( p ) ); + disposer()( p ); + } + }; + + /// Position pointer for item search + struct position { + node_type * pPred ; ///< Previous node + node_type * pCur ; ///< Current node + + typename gc::template GuardArray<2> guards ; ///< Guards array + + enum { + guard_prev_item, + guard_current_item + }; + + /// Locks nodes \p pPred and \p pCur + void lock() + { + pPred->m_Lock.lock(); + pCur->m_Lock.lock(); + } + + /// Unlocks nodes \p pPred and \p pCur + void unlock() + { + pCur->m_Lock.unlock(); + pPred->m_Lock.unlock(); + } + }; + + class auto_lock_position { + position& m_pos; + public: + auto_lock_position( position& pos ) + : m_pos(pos) + { + pos.lock(); + } + ~auto_lock_position() + { + m_pos.unlock(); + } + }; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_erase_functor { + void operator()( value_type const & item ) + {} + }; +# endif + //@endcond + + protected: + //@cond + void link_node( node_type * pNode, node_type * pPred, node_type * pCur ) + { + assert( pPred->m_pNext.load(memory_model::memory_order_relaxed).ptr() == pCur ); + + pNode->m_pNext.store( marked_node_ptr(pCur), memory_model::memory_order_release ); + pPred->m_pNext.store( marked_node_ptr(pNode), memory_model::memory_order_release ); + } + + void unlink_node( node_type * pPred, node_type * pCur, node_type * pHead ) + { + assert( pPred->m_pNext.load(memory_model::memory_order_relaxed).ptr() == pCur ); + + node_type * pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed).ptr(); + //pCur->m_pNext.store( marked_node_ptr( pNext, 1), memory_model::memory_order_release) ; // logically deleting + pCur->m_pNext.store( marked_node_ptr( pHead, 1 ), memory_model::memory_order_release ) ; // logical removal + back-link for search + pPred->m_pNext.store( marked_node_ptr( pNext ), memory_model::memory_order_release); // physically deleting + //pCur->m_pNext.store( marked_node_ptr( pHead, 1 ), memory_model::memory_order_release ) ; // back-link for search + } + + void retire_node( node_type * pNode ) + { + assert( pNode != null_ptr() ); + gc::template retire( node_traits::to_value_ptr( *pNode ) ); + } + //@endcond + + protected: + //@cond + template + class iterator_type + { + friend class LazyList; + + protected: + value_type * m_pNode; + typename gc::Guard m_Guard; + + void next() + { + assert( m_pNode != null_ptr() ); + + if ( m_pNode ) { + typename gc::Guard g; + node_type * pCur = node_traits::to_node_ptr( m_pNode ); + if ( pCur->m_pNext.load(memory_model::memory_order_relaxed).ptr() != null_ptr() ) { // if pCur is not tail node + node_type * pNext; + do { + pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed).ptr(); + g.assign( node_traits::to_value_ptr( pNext )); + } while ( pNext != pCur->m_pNext.load(memory_model::memory_order_relaxed).ptr() ); + + m_pNode = m_Guard.assign( g.template get() ); + } + } + } + + void skip_deleted() + { + if ( m_pNode != null_ptr() ) { + typename gc::Guard g; + node_type * pNode = node_traits::to_node_ptr( m_pNode ); + + // Dummy tail node could not be marked + while ( pNode->is_marked() ) { + node_type * p = pNode->m_pNext.load(memory_model::memory_order_relaxed).ptr(); + g.assign( node_traits::to_value_ptr( p )); + if ( p == pNode->m_pNext.load(memory_model::memory_order_relaxed).ptr() ) + pNode = p; + } + if ( pNode != node_traits::to_node_ptr( m_pNode ) ) + m_pNode = m_Guard.assign( g.template get() ); + } + } + + iterator_type( node_type * pNode ) + { + m_pNode = m_Guard.assign( node_traits::to_value_ptr( pNode )); + skip_deleted(); + } + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + : m_pNode(null_ptr()) + {} + + iterator_type( iterator_type const& src ) + { + if ( src.m_pNode ) { + m_pNode = m_Guard.assign( src.m_pNode ); + } + else + m_pNode = null_ptr(); + } + + value_ptr operator ->() const + { + return m_pNode; + } + + value_ref operator *() const + { + assert( m_pNode != null_ptr() ); + return *m_pNode; + } + + /// Pre-increment + iterator_type& operator ++() + { + next(); + skip_deleted(); + return *this; + } + + iterator_type& operator = (iterator_type const& src) + { + m_pNode = src.m_pNode; + m_Guard.assign( m_pNode ); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator_type const& i ) const + { + return m_pNode != i.m_pNode; + } + }; + //@endcond + + public: + /// Forward iterator + /** + The forward iterator for lazy list has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. + For some GC (gc::HP, gc::HRC), a guard is limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if a limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary since it contains GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the list. + + Therefore, the use of iterators in concurrent environment is not good idea. Use the iterator on the concurrent container + for debug purpose only. + */ + typedef iterator_type iterator; + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + iterator it( head() ); + ++it ; // skip dummy head + return it; + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( tail() ); + } + + /// Returns a forward const iterator addressing the first element in a list + //@{ + const_iterator begin() const + { + return get_const_begin(); + } + const_iterator cbegin() + { + return get_const_begin(); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a list + //@{ + const_iterator end() const + { + return get_const_end(); + } + const_iterator cend() + { + return get_const_end(); + } + //@} + + private: + //@cond + const_iterator get_const_begin() const + { + const_iterator it( const_cast( head() )); + ++it ; // skip dummy head + return it; + } + const_iterator get_const_end() const + { + return const_iterator( const_cast( tail() )); + } + //@endcond + + public: + /// Default constructor initializes empty list + LazyList() + { + static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); + + //m_pTail = cxx_allocator().New(); + head()->m_pNext.store( marked_node_ptr( tail() ), memory_model::memory_order_relaxed ); + } + + /// Destroys the list object + ~LazyList() + { + clear(); + assert( head()->m_pNext.load(memory_model::memory_order_relaxed).ptr() == tail() ); + head()->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); + } + + /// Inserts new node + /** + The function inserts \p val in the list if the list does not contain + an item with key equal to \p val. + + Returns \p true if \p val is linked into the list, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert_at( head(), val ); + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split new item creating into two part: + - create item with key only + - insert new item into the list + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. + While the functor \p f is working the item \p val is locked. + The user-defined functor is called only if the inserting is success and may be passed by reference + using boost::ref. + */ + template + bool insert( value_type& val, Func f ) + { + return insert_at( head(), val, f ); + } + + /// Ensures that the \p item exists in the list + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the list, then \p val is inserted into the list. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + The functor may change non-key fields of the \p item. + While the functor \p f is working the item \p item is locked. + + You may pass \p func argument by reference using boost::ref or cds::ref. + + Returns std::pair where \p first is \p true if operation is successfull, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( value_type& val, Func func ) + { + return ensure_at( head(), val, func ); + } + + /// Unlinks the item \p val from the list + /** + The function searches the item \p val in the list and unlink it from the list + if it is found and it is equal to \p val. + + Difference between \ref erase and \p unlink functions: \p erase finds a key + and deletes the item found. \p unlink finds an item by key and deletes it + only if \p val is an item of that list, i.e. the pointer to item found + is equal to &val . + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + return unlink_at( head(), val ); + } + + /// Deletes the item from the list + /** \anchor cds_intrusive_LazyList_hp_erase_val + The function searches an item with key equal to \p val in the list, + unlinks it from the list, and returns \p true. + If the item with the key equal to \p val is not found the function return \p false. + */ + template + bool erase( Q const& val ) + { + return erase_at( head(), val, key_comparator() ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_LazyList_hp_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& val, Less pred ) + { + return erase_at( head(), val, cds::opt::details::make_comparator_from_less() ); + } + + /// Deletes the item from the list + /** \anchor cds_intrusive_LazyList_hp_erase_func + The function searches an item with key equal to \p val in the list, + call \p func functor with item found, unlinks it from the list, and returns \p true. + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + The functor may be passed by reference using boost:ref + + If the item with the key equal to \p val is not found the function return \p false. + */ + template + bool erase( const Q& val, Func func ) + { + return erase_at( head(), val, key_comparator(), func ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_LazyList_hp_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( const Q& val, Less pred, Func func ) + { + return erase_at( head(), val, cds::opt::details::make_comparator_from_less(), func ); + } + + /// Extracts the item from the list with specified \p key + /** \anchor cds_intrusive_LazyList_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the list, and returns it in \p dest parameter. + If the item with key equal to \p key is not found the function returns \p false. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + The \ref disposer specified in \p Traits class template parameter is called automatically + by garbage collector \p GC specified in class' template parameters when returned \ref guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::intrusive::LazyList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp; + theList.extract( gp, 5 ); + // Deal with gp + // ... + + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + bool extract( guarded_ptr& dest, Q const& key ) + { + return extract_at( head(), dest.guard(), key, key_comparator() ); + } + + /// Extracts the item from the list with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_LazyList_hp_extract "extract(guarded_ptr&, Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool extract_with( guarded_ptr& dest, Q const& key, Less pred ) + { + return extract_at( head(), dest.guard(), key, cds::opt::details::make_comparator_from_less() ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_LazyList_hp_find + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. + While the functor \p f is calling the item \p item is locked. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return find_at( head(), val, key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_LazyList_hp_find "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { + return find_at( head(), val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_LazyList_hp_find_const + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the \p find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. + While the functor \p f is calling the item \p item is locked. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return find_at( head(), val, key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_LazyList_hp_find_const "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { + return find_at( head(), val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_LazyList_hp_find_val + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise + */ + template + bool find( Q const & val ) + { + return find_at( head(), val, key_comparator() ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_LazyList_hp_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& val, Less pred ) + { + return find_at( head(), val, cds::opt::details::make_comparator_from_less() ); + } + + /// Finds the key \p val and return the item found + /** \anchor cds_intrusive_LazyList_hp_get + The function searches the item with key equal to \p val + and assigns the item found to guarded pointer \p ptr. + The function returns \p true if \p val is found, and \p false otherwise. + If \p val is not found the \p ptr parameter is not changed. + + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC automatically when returned \ref guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::intrusive::LazyList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp; + if ( theList.get( gp, 5 )) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool get( guarded_ptr& ptr, Q const& val ) + { + return get_at( head(), ptr.guard(), val, key_comparator() ); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_intrusive_LazyList_hp_get "get( guarded_ptr& ptr, Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool get_with( guarded_ptr& ptr, Q const& val, Less pred ) + { + return get_at( head(), ptr.guard(), val, cds::opt::details::make_comparator_from_less() ); + } + + /// Clears the list + /** + The function unlink all items from the list. + */ + void clear() + { + typename gc::Guard guard; + marked_node_ptr h; + while ( !empty() ) { + h = head()->m_pNext.load(memory_model::memory_order_relaxed); + guard.assign( node_traits::to_value_ptr( h.ptr() )); + if ( head()->m_pNext.load(memory_model::memory_order_acquire) == h ) { + head()->m_Lock.lock(); + h->m_Lock.lock(); + + unlink_node( head(), h.ptr(), head() ); + + h->m_Lock.unlock(); + head()->m_Lock.unlock(); + + retire_node( h.ptr() ) ; // free node + } + } + } + + /// Checks if the list is empty + bool empty() const + { + return head()->m_pNext.load(memory_model::memory_order_relaxed).ptr() == tail(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + protected: + //@cond + // split-list support + bool insert_aux_node( node_type * pNode ) + { + return insert_aux_node( head(), pNode ); + } + + // split-list support + bool insert_aux_node( node_type * pHead, node_type * pNode ) + { + assert( pNode != null_ptr() ); + + // Hack: convert node_type to value_type. + // In principle, auxiliary node can be non-reducible to value_type + // We assume that comparator can correctly distinguish aux and regular node. + return insert_at( pHead, *node_traits::to_value_ptr( pNode ) ); + } + + bool insert_at( node_type * pHead, value_type& val ) + { + link_checker::is_empty( node_traits::to_node_ptr( val ) ); + position pos; + key_comparator cmp; + + while ( true ) { + search( pHead, val, pos, key_comparator() ); + { + auto_lock_position alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != tail() && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // failed: key already in list + return false; + } + else { + link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); + ++m_ItemCounter; + return true; + } + } + } + } + } + + template + bool insert_at( node_type * pHead, value_type& val, Func f ) + { + link_checker::is_empty( node_traits::to_node_ptr( val ) ); + position pos; + key_comparator cmp; + + while ( true ) { + search( pHead, val, pos, key_comparator() ); + { + auto_lock_position alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != tail() && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // failed: key already in list + return false; + } + else { + link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); + cds::unref(f)( val ); + ++m_ItemCounter; + return true; + } + } + } + } + } + + template + std::pair ensure_at( node_type * pHead, value_type& val, Func func ) + { + position pos; + key_comparator cmp; + + while ( true ) { + search( pHead, val, pos, key_comparator() ); + { + auto_lock_position alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != tail() && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // key already in the list + + cds::unref(func)( false, *node_traits::to_value_ptr( *pos.pCur ) , val ); + return std::make_pair( true, false ); + } + else { + // new key + link_checker::is_empty( node_traits::to_node_ptr( val ) ); + + link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); + cds::unref(func)( true, val, val ); + ++m_ItemCounter; + return std::make_pair( true, true ); + } + } + } + } + } + + bool unlink_at( node_type * pHead, value_type& val ) + { + position pos; + key_comparator cmp; + + while ( true ) { + search( pHead, val, pos, key_comparator() ); + { + int nResult = 0; + { + auto_lock_position alp( pos ); + if ( validate( pos.pPred, pos.pCur ) ) { + if ( pos.pCur != tail() + && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 + && node_traits::to_value_ptr( pos.pCur ) == &val ) + { + // item found + unlink_node( pos.pPred, pos.pCur, pHead ); + --m_ItemCounter; + nResult = 1; + } + else + nResult = -1; + } + } + if ( nResult ) { + if ( nResult > 0 ) { + retire_node( pos.pCur ); + return true; + } + return false; + } + } + } + } + + template + bool erase_at( node_type * pHead, const Q& val, Compare cmp, Func f, position& pos ) + { + while ( true ) { + search( pHead, val, pos, cmp ); + { + int nResult = 0; + { + auto_lock_position alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != tail() && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // key found + unlink_node( pos.pPred, pos.pCur, pHead ); + cds::unref(f)( *node_traits::to_value_ptr( *pos.pCur ) ); + --m_ItemCounter; + nResult = 1; + } + else { + nResult = -1; + } + } + } + if ( nResult ) { + if ( nResult > 0 ) { + retire_node( pos.pCur ); + return true; + } + return false; + } + } + } + } + + template + bool erase_at( node_type * pHead, const Q& val, Compare cmp, Func f ) + { + position pos; + return erase_at( pHead, val, cmp, f, pos ); + } + + template + bool erase_at( node_type * pHead, const Q& val, Compare cmp ) + { + position pos; +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_at( pHead, val, cmp, [](value_type const &){}, pos ); +# else + return erase_at( pHead, val, cmp, empty_erase_functor(), pos ); +# endif + } + + template + bool extract_at( node_type * pHead, typename gc::Guard& gp, const Q& val, Compare cmp ) + { + position pos; + if ( +# ifdef CDS_CXX11_LAMBDA_SUPPORT + erase_at( pHead, val, cmp, [](value_type const &){}, pos ) +# else + erase_at( pHead, val, cmp, empty_erase_functor(), pos ) +# endif + ) + { + gp.assign( pos.guards.template get(position::guard_current_item) ); + return true; + } + return false; + } + + template + bool find_at( node_type * pHead, Q& val, Compare cmp, Func f ) + { + position pos; + + search( pHead, val, pos, cmp ); + if ( pos.pCur != tail() ) { + cds::lock::scoped_lock< typename node_type::lock_type> al( pos.pCur->m_Lock ); + if ( !pos.pCur->is_marked() + && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) + { + cds::unref(f)( *node_traits::to_value_ptr( *pos.pCur ), val ); + return true; + } + } + return false; + } + + template + bool find_at( node_type * pHead, Q const& val, Compare cmp ) + { + position pos; + + search( pHead, val, pos, cmp ); + return pos.pCur != tail() + && !pos.pCur->is_marked() + && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0; + } + + template + bool get_at( node_type * pHead, typename gc::Guard& gp, Q const& val, Compare cmp ) + { + position pos; + + search( pHead, val, pos, cmp ); + if ( pos.pCur != tail() + && !pos.pCur->is_marked() + && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) + { + gp.assign( pos.guards.template get( position::guard_current_item )); + return true; + } + return false; + } + + //@endcond + + protected: + //@cond + template + void search( node_type * pHead, const Q& key, position& pos, Compare cmp ) + { + const node_type * pTail = tail(); + + marked_node_ptr pCur( pHead ); + marked_node_ptr pPrev( pHead ); + + back_off bkoff; + + while ( pCur.ptr() != pTail ) + { + if ( pCur.ptr() != pHead ) { + if ( cmp( *node_traits::to_value_ptr( *pCur.ptr() ), key ) >= 0 ) + break; + } + + pos.guards.copy( position::guard_prev_item, position::guard_current_item ); + pPrev = pCur; + + for (;;) { + pCur = pPrev->m_pNext.load(memory_model::memory_order_relaxed); + pos.guards.assign( position::guard_current_item, node_traits::to_value_ptr( pCur.ptr() )); + if ( pCur == pPrev->m_pNext.load(memory_model::memory_order_acquire) ) + break; + bkoff(); + } + assert( pCur.ptr() != null_ptr() ); + } + + pos.pCur = pCur.ptr(); + pos.pPred = pPrev.ptr(); + } + + static bool validate( node_type * pPred, node_type * pCur ) + { + return !pPred->is_marked() + && !pCur->is_marked() + && pPred->m_pNext.load(memory_model::memory_order_relaxed) == pCur; + } + + //@endcond + }; +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_LAZY_LIST_IMPL_H diff --git a/cds/intrusive/lazy_list_nogc.h b/cds/intrusive/lazy_list_nogc.h new file mode 100644 index 00000000..d0e9b5f5 --- /dev/null +++ b/cds/intrusive/lazy_list_nogc.h @@ -0,0 +1,681 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_LAZY_LIST_NOGC_H +#define __CDS_INTRUSIVE_LAZY_LIST_NOGC_H + +#include +#include + +namespace cds { namespace intrusive { + namespace lazy_list { + /// Lazy list node for gc::nogc + /** + Template parameters: + - Tag - a tag used to distinguish between different implementation + */ + template + struct node + { + typedef gc::nogc gc ; ///< Garbage collector + typedef Lock lock_type ; ///< Lock type + typedef Tag tag ; ///< tag + + CDS_ATOMIC::atomic m_pNext ; ///< pointer to the next node in the list + mutable lock_type m_Lock ; ///< Node lock + + node() + : m_pNext( null_ptr()) + {} + }; + } // namespace lazy_list + + + /// Lazy ordered single-linked list (template specialization for gc::nogc) + /** @ingroup cds_intrusive_list + \anchor cds_intrusive_LazyList_nogc + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of list item. + + See \ref cds_intrusive_LazyList_hp "LazyList" for description of template parameters. + + The interface of the specialization is a slightly different. + + The gc::nogc specialization of LazyList accepts following template argument list + \p Options of cds::intrusive::lazy_list::make_traits metafunction: + - opt::hook - hook used. Possible values are: lazy_list::base_hook, lazy_list::member_hook, lazy_list::traits_hook. + If the option is not specified, lazy_list::base_hook<> and gc::HP is used. + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. + - opt::disposer - the functor used for dispose removed items. Default is opt::v::empty_disposer. The disposer + provided is used only in \ref clear() function. + - opt::link_checker - the type of node's link fields checking. Default is \ref opt::debug_check_link + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + + The opt::allocator and opt::back_off is not used for this specialization. + + */ + template < + typename T +#ifdef CDS_DOXYGEN_INVOKED + ,class Traits = lazy_list::type_traits +#else + ,class Traits +#endif + > + class LazyList + { + public: + typedef T value_type ; ///< type of value stored in the list + typedef Traits options ; ///< Traits template parameter + + typedef typename options::hook hook ; ///< hook type + typedef typename hook::node_type node_type ; ///< node type + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter. +# else + typedef typename opt::details::make_comparator< value_type, options >::type key_comparator; +# endif + + typedef typename options::disposer disposer ; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + typedef typename lazy_list::get_link_checker< node_type, options::link_checker >::type link_checker ; ///< link checker + + typedef gc::nogc gc ; ///< Garbage collector + typedef typename options::back_off back_off ; ///< back-off strategy (not used) + typedef typename options::item_counter item_counter ; ///< Item counting policy used + typedef typename options::memory_model memory_model; ///< C++ memory ordering (see lazy_list::type_traits::memory_model) + + //@cond + // Rebind options (split-list support) + template + struct rebind_options { + typedef LazyList< + gc + , value_type + , typename cds::opt::make_options< options, CDS_OPTIONS8>::type + > type; + }; + //@endcond + + protected: + typedef node_type * auxiliary_head ; ///< Auxiliary head type (for split-list support) + + protected: + node_type m_Head ; ///< List head (dummy node) + node_type m_Tail; ///< List tail (dummy node) + item_counter m_ItemCounter ; ///< Item counter + + //@cond + + /// Position pointer for item search + struct position { + node_type * pPred ; ///< Previous node + node_type * pCur ; ///< Current node + + /// Locks nodes \p pPred and \p pCur + void lock() + { + pPred->m_Lock.lock(); + pCur->m_Lock.lock(); + } + + /// Unlocks nodes \p pPred and \p pCur + void unlock() + { + pCur->m_Lock.unlock(); + pPred->m_Lock.unlock(); + } + }; + + class auto_lock_position { + position& m_pos; + public: + auto_lock_position( position& pos ) + : m_pos(pos) + { + pos.lock(); + } + ~auto_lock_position() + { + m_pos.unlock(); + } + }; + //@endcond + + protected: + //@cond + void clear_links( node_type * pNode ) + { + pNode->m_pNext.store( null_ptr(), memory_model::memory_order_relaxed ); + } + + template + void dispose_node( node_type * pNode, Disposer disp ) + { + clear_links( pNode ); + cds::unref(disp)( node_traits::to_value_ptr( *pNode )); + } + + template + void dispose_value( value_type& val, Disposer disp ) + { + dispose_node( node_traits::to_node_ptr( val ), disp ); + } + + void link_node( node_type * pNode, node_type * pPred, node_type * pCur ) + { + assert( pPred->m_pNext.load(memory_model::memory_order_relaxed) == pCur ); + + pNode->m_pNext.store( pCur, memory_model::memory_order_release ); + pPred->m_pNext.store( pNode, memory_model::memory_order_release ); + } + //@endcond + + protected: + //@cond + template + class iterator_type + { + friend class LazyList; + + protected: + value_type * m_pNode; + + void next() + { + assert( m_pNode != null_ptr() ); + + node_type * pNode = node_traits::to_node_ptr( m_pNode ); + node_type * pNext = pNode->m_pNext.load(memory_model::memory_order_relaxed); + if ( pNext != null_ptr() ) + m_pNode = node_traits::to_value_ptr( pNext ); + } + + iterator_type( node_type * pNode ) + { + m_pNode = node_traits::to_value_ptr( pNode ); + } + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + : m_pNode(null_ptr()) + {} + + iterator_type( const iterator_type& src ) + : m_pNode( src.m_pNode ) + {} + + value_ptr operator ->() const + { + return m_pNode; + } + + value_ref operator *() const + { + assert( m_pNode != null_ptr() ); + return *m_pNode; + } + + /// Pre-increment + iterator_type& operator ++() + { + next(); + return *this; + } + + /// Post-increment + iterator_type operator ++(int) + { + iterator_type i(*this); + next(); + return i; + } + + iterator_type& operator = (const iterator_type& src) + { + m_pNode = src.m_pNode; + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator_type const& i ) const + { + return m_pNode != i.m_pNode; + } + }; + //@endcond + + public: + /// Forward iterator + typedef iterator_type iterator; + /// Const forward iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + iterator it( &m_Head ); + ++it ; // skip dummy head + return it; + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( &m_Tail ); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + const_iterator it( const_cast( &m_Head )); + ++it ; // skip dummy head + return it; + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator( const_cast( &m_Tail )); + } + + public: + /// Default constructor initializes empty list + LazyList() + { + static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); + + m_Head.m_pNext.store( &m_Tail, memory_model::memory_order_relaxed ); + } + + /// Destroys the list object + ~LazyList() + { + clear(); + + assert( m_Head.m_pNext.load(memory_model::memory_order_relaxed) == &m_Tail ); + m_Head.m_pNext.store( null_ptr(), memory_model::memory_order_relaxed ); + } + + /// Inserts new node + /** + The function inserts \p val in the list if the list does not contain + an item with key equal to \p val. + + Returns \p true if \p val is linked into the list, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert_at( &m_Head, val ); + } + + /// Ensures that the \p item exists in the list + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the list, then \p val is inserted into the list. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + struct functor { + void operator()( bool bNew, value_type& item, value_type& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same thing. + + The functor may change non-key fields of the \p item. + While the functor \p f is calling the item \p item is locked. + + You may pass \p func argument by reference using boost::ref or cds::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + + template + std::pair ensure( value_type& val, Func func ) + { + return ensure_at( &m_Head, val, func ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_LazyList_nogc_find_func + The function searches the item with key equal to \p val + and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. + While the functor \p f is calling the item found \p item is locked. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return find_at( &m_Head, val, key_comparator(), f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_LazyList_nogc_find_cfunc + The function searches the item with key equal to \p val + and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. + While the functor \p f is calling the item found \p item is locked. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return find_at( &m_Head, val, key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_LazyList_nogc_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { + return find_at( &m_Head, val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_LazyList_nogc_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { + return find_at( &m_Head, val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_LazyList_nogc_find_val + The function searches the item with key equal to \p val + and returns pointer to value found or \p NULL. + */ + template + value_type * find( Q const& val ) + { + return find_at( &m_Head, val, key_comparator() ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_LazyList_nogc_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + value_type * find_with( Q const & val, Less pred ) + { + return find_at( &m_Head, val, cds::opt::details::make_comparator_from_less() ); + } + + /// Clears the list + /** + The function unlink all items from the list. + For each unlinked item the item disposer \p disp is called after unlinking. + + This function is not thread-safe. + */ + template + void clear( Disposer disp ) + { + node_type * pHead = m_Head.m_pNext.exchange( &m_Tail, memory_model::memory_order_release ); + + while ( pHead != &m_Tail ) { + node_type * p = pHead->m_pNext.load(memory_model::memory_order_relaxed); + dispose_node( pHead, disp ); + pHead = p; + } + } + + /// Clears the list using default disposer + /** + The function clears the list using default (provided in class template) disposer functor. + */ + void clear() + { + clear( disposer() ); + } + + /// Checks if the list is empty + bool empty() const + { + return m_Head.m_pNext.load(memory_model::memory_order_relaxed) == &m_Tail; + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + protected: + //@cond + // split-list support + bool insert_aux_node( node_type * pNode ) + { + return insert_aux_node( &m_Head, pNode ); + } + + // split-list support + bool insert_aux_node( node_type * pHead, node_type * pNode ) + { + assert( pHead != null_ptr() ); + assert( pNode != null_ptr() ); + + // Hack: convert node_type to value_type. + // In principle, auxiliary node can be non-reducible to value_type + // We assume that comparator can correctly distinguish aux and regular node. + return insert_at( pHead, *node_traits::to_value_ptr( pNode ) ); + } + + bool insert_at( node_type * pHead, value_type& val ) + { + link_checker::is_empty( node_traits::to_node_ptr( val ) ); + position pos; + key_comparator cmp; + + while ( true ) { + search( pHead, val, pos, key_comparator() ); + { + auto_lock_position alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // failed: key already in list + return false; + } + else { + link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); + ++m_ItemCounter; + return true; + } + } + } + } + } + + iterator insert_at_( node_type * pHead, value_type& val ) + { + if ( insert_at( pHead, val )) + return iterator( node_traits::to_node_ptr( val )); + return end(); + } + + + template + std::pair ensure_at_( node_type * pHead, value_type& val, Func func ) + { + position pos; + key_comparator cmp; + + while ( true ) { + search( pHead, val, pos, key_comparator() ); + { + auto_lock_position alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // key already in the list + + cds::unref(func)( false, *node_traits::to_value_ptr( *pos.pCur ) , val ); + return std::make_pair( iterator( pos.pCur ), false ); + } + else { + // new key + link_checker::is_empty( node_traits::to_node_ptr( val ) ); + + link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); + cds::unref(func)( true, val, val ); + ++m_ItemCounter; + return std::make_pair( iterator( node_traits::to_node_ptr( val )), true ); + } + } + } + } + } + + template + std::pair ensure_at( node_type * pHead, value_type& val, Func func ) + { + std::pair ret = ensure_at_( pHead, val, func ); + return std::make_pair( ret.first != end(), ret.second ); + } + + template + bool find_at( node_type * pHead, Q& val, Compare cmp, Func f ) + { + position pos; + + search( pHead, val, pos, cmp ); + if ( pos.pCur != &m_Tail ) { + cds::lock::scoped_lock< typename node_type::lock_type> al( pos.pCur->m_Lock ); + if ( cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) + { + cds::unref(f)( *node_traits::to_value_ptr( *pos.pCur ), val ); + return true; + } + } + return false; + } + + template + value_type * find_at( node_type * pHead, Q& val, Compare cmp) + { + iterator it = find_at_( pHead, val, cmp ); + if ( it != end() ) + return &*it; + return null_ptr(); + } + + template + iterator find_at_( node_type * pHead, Q& val, Compare cmp) + { + position pos; + + search( pHead, val, pos, cmp ); + if ( pos.pCur != &m_Tail ) { + cds::lock::scoped_lock< typename node_type::lock_type> al( pos.pCur->m_Lock ); + if ( cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) + { + return iterator( pos.pCur ); + } + } + return end(); + } + + //@endcond + + protected: + //@cond + template + void search( node_type * pHead, const Q& key, position& pos, Compare cmp ) + { + const node_type * pTail = &m_Tail; + + node_type * pCur = pHead; + node_type * pPrev = pHead; + + while ( pCur != pTail && ( pCur == pHead || cmp( *node_traits::to_value_ptr( *pCur ), key ) < 0 )) { + pPrev = pCur; + pCur = pCur->m_pNext.load(memory_model::memory_order_acquire); + } + + pos.pCur = pCur; + pos.pPred = pPrev; + } + + static bool validate( node_type * pPred, node_type * pCur ) + { + return pPred->m_pNext.load(memory_model::memory_order_acquire) == pCur; + } + + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_LAZY_LIST_NOGC_H diff --git a/cds/intrusive/lazy_list_ptb.h b/cds/intrusive/lazy_list_ptb.h new file mode 100644 index 00000000..49e4cb45 --- /dev/null +++ b/cds/intrusive/lazy_list_ptb.h @@ -0,0 +1,9 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_LAZY_LIST_PTB_H +#define __CDS_INTRUSIVE_LAZY_LIST_PTB_H + +#include +#include + +#endif // #ifndef __CDS_INTRUSIVE_LAZY_LIST_PTB_H diff --git a/cds/intrusive/lazy_list_rcu.h b/cds/intrusive/lazy_list_rcu.h new file mode 100644 index 00000000..77d8b1d4 --- /dev/null +++ b/cds/intrusive/lazy_list_rcu.h @@ -0,0 +1,1227 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_LAZY_LIST_RCU_H +#define __CDS_INTRUSIVE_LAZY_LIST_RCU_H + +#include +#include +#include +#include + +namespace cds { namespace intrusive { + namespace lazy_list { + /// Lazy list node for \ref cds_urcu_desc "RCU" + /** + Template parameters: + - Tag - a tag used to distinguish between different implementation + */ + template + struct node, Lock, Tag> + { + typedef cds::urcu::gc gc ; ///< RCU schema + typedef Lock lock_type ; ///< Lock type + typedef Tag tag ; ///< tag + + typedef cds::details::marked_ptr marked_ptr ; ///< marked pointer + typedef CDS_ATOMIC::atomic atomic_marked_ptr ; ///< atomic marked pointer specific for GC + + atomic_marked_ptr m_pNext ; ///< pointer to the next node in the list + mutable lock_type m_Lock ; ///< Node lock + + /// Checks if node is marked + bool is_marked() const + { + return m_pNext.load(CDS_ATOMIC::memory_order_relaxed).bits() != 0; + } + + /// Default ctor + node() + : m_pNext( null_ptr()) + {} + + /// Clears internal fields + void clear() + { + m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_release ); + } + }; + } // namespace lazy_list + + + /// Lazy ordered single-linked list (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_intrusive_list + \anchor cds_intrusive_LazyList_rcu + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p T - type to be stored in the list + - \p Traits - type traits. See lazy_list::type_traits for explanation. + + It is possible to declare option-based list with \p %cds::intrusive::lazy_list::make_traits metafunction istead of \p Traits template + argument. Template argument list \p Options of cds::intrusive::lazy_list::make_traits metafunction are: + - opt::hook - hook used. Possible values are: lazy_list::base_hook, lazy_list::member_hook, lazy_list::traits_hook. + If the option is not specified, lazy_list::base_hook<> is used. + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. + - opt::disposer - the functor used for dispose removed items. Default is opt::v::empty_disposer + - opt::rcu_check_deadlock - a deadlock checking policy. Default is opt::v::rcu_throw_deadlock + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + + \par Usage + Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" you should include: + \code + #include + #include + + // Now, you can declare lazy list for type Foo and default traits: + typedef cds::intrusive::LazyList >, Foo > rcu_lazy_list; + \endcode + + */ + template < + typename RCU + ,typename T +#ifdef CDS_DOXYGEN_INVOKED + ,class Traits = lazy_list::type_traits +#else + ,class Traits +#endif + > + class LazyList, T, Traits> + { + public: + typedef T value_type ; ///< type of value stored in the list + typedef Traits options ; ///< Traits template parameter + + typedef typename options::hook hook ; ///< hook type + typedef typename hook::node_type node_type ; ///< node type + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key compare functor based on opt::compare and opt::less option setter. +# else + typedef typename opt::details::make_comparator< value_type, options >::type key_comparator; +# endif + + typedef typename options::disposer disposer ; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + typedef typename lazy_list::get_link_checker< node_type, options::link_checker >::type link_checker ; ///< link checker + + typedef cds::urcu::gc gc ; ///< RCU schema + typedef typename options::back_off back_off ; ///< back-off strategy (not used) + typedef typename options::item_counter item_counter ; ///< Item counting policy used + typedef typename options::memory_model memory_model ; ///< C++ memory ordering (see lazy_list::type_traits::memory_model) + typedef typename options::rcu_check_deadlock rcu_check_deadlock ; ///< Deadlock checking policy + + typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock + static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = true; ///< Group of \p extract_xxx functions require external locking + + //@cond + // Rebind options (split-list support) + template + struct rebind_options { + typedef LazyList< + gc + , value_type + , typename cds::opt::make_options< options, CDS_OPTIONS8>::type + > type; + }; + //@endcond + + protected: + typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer + typedef node_type * auxiliary_head ; ///< Auxiliary head type (for split-list support) + + protected: + node_type m_Head ; ///< List head (dummy node) + node_type m_Tail; ///< List tail (dummy node) + item_counter m_ItemCounter ; ///< Item counter + + //@cond + + /// Position pointer for item search + struct position { + node_type * pPred ; ///< Previous node + node_type * pCur ; ///< Current node + + /// Locks nodes \p pPred and \p pCur + void lock() + { + pPred->m_Lock.lock(); + pCur->m_Lock.lock(); + } + + /// Unlocks nodes \p pPred and \p pCur + void unlock() + { + pCur->m_Lock.unlock(); + pPred->m_Lock.unlock(); + } + }; + + class auto_lock_position { + position& m_pos; + public: + auto_lock_position( position& pos ) + : m_pos(pos) + { + pos.lock(); + } + ~auto_lock_position() + { + m_pos.unlock(); + } + }; + + typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_erase_functor { + void operator()( value_type const& item ) + {} + }; + + struct get_functor { + value_type * pFound; + + get_functor() + : pFound(null_ptr()) + {} + + template + void operator()( value_type& item, Q& val ) + { + pFound = &item; + } + }; +# endif + + //@endcond + + protected: + //@cond + static void clear_links( node_type * pNode ) + { + pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); + } + + struct clear_and_dispose { + void operator()( value_type * p ) + { + assert( p != null_ptr() ); + clear_links( node_traits::to_node_ptr(p)); + disposer()( p ); + } + }; + + static void dispose_node( node_type * pNode ) + { + assert( pNode ); + assert( !gc::is_locked() ); + + gc::template retire_ptr( node_traits::to_value_ptr( *pNode ) ); + } + + void link_node( node_type * pNode, node_type * pPred, node_type * pCur ) + { + assert( pPred->m_pNext.load(memory_model::memory_order_relaxed).ptr() == pCur ); + + pNode->m_pNext.store( marked_node_ptr(pCur), memory_model::memory_order_release ); + pPred->m_pNext.store( marked_node_ptr(pNode), memory_model::memory_order_release ); + } + + void unlink_node( node_type * pPred, node_type * pCur, node_type * pHead ) + { + assert( pPred->m_pNext.load(memory_model::memory_order_relaxed).ptr() == pCur ); + assert( pCur != &m_Tail ); + + node_type * pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed).ptr(); + //pCur->m_pNext.store( marked_node_ptr( pNext, 1), memory_model::memory_order_relaxed) ; // logically deleting + pCur->m_pNext.store( marked_node_ptr( pHead, 1 ), memory_model::memory_order_relaxed ) ; // logical deletion + back-link for search + pPred->m_pNext.store( marked_node_ptr( pNext ), memory_model::memory_order_relaxed); // physically deleting + //pCur->m_pNext.store( marked_node_ptr( pHead, 1 ), memory_model::memory_order_relaxed ) ; // back-link for search + } + + //@endcond + + public: + typedef cds::urcu::exempt_ptr< gc, value_type, value_type, clear_and_dispose, void > exempt_ptr ; ///< pointer to extracted node + + protected: + //@cond + template + class iterator_type + { + friend class LazyList; + + protected: + value_type * m_pNode; + + void next() + { + assert( m_pNode != null_ptr() ); + + node_type * pNode = node_traits::to_node_ptr( m_pNode ); + node_type * pNext = pNode->m_pNext.load(memory_model::memory_order_relaxed).ptr(); + if ( pNext != null_ptr() ) + m_pNode = node_traits::to_value_ptr( pNext ); + } + + void skip_deleted() + { + if ( m_pNode != null_ptr() ) { + node_type * pNode = node_traits::to_node_ptr( m_pNode ); + + // Dummy tail node could not be marked + while ( pNode->is_marked() ) + pNode = pNode->m_pNext.load(memory_model::memory_order_relaxed).ptr(); + + if ( pNode != node_traits::to_node_ptr( m_pNode ) ) + m_pNode = node_traits::to_value_ptr( pNode ); + } + } + + iterator_type( node_type * pNode ) + { + m_pNode = node_traits::to_value_ptr( pNode ); + skip_deleted(); + } + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + : m_pNode(null_ptr()) + {} + + iterator_type( iterator_type const& src ) + : m_pNode( src.m_pNode ) + {} + + value_ptr operator ->() const + { + return m_pNode; + } + + value_ref operator *() const + { + assert( m_pNode != null_ptr() ); + return *m_pNode; + } + + /// Pre-increment + iterator_type& operator ++() + { + next(); + skip_deleted(); + return *this; + } + + /// Post-increment + iterator_type operator ++(int) + { + iterator_type i(*this); + next(); + skip_deleted(); + return i; + } + + iterator_type& operator = (iterator_type const& src) + { + m_pNode = src.m_pNode; + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator_type const& i ) const + { + return m_pNode != i.m_pNode; + } + }; + //@endcond + + public: + /// Forward iterator + typedef iterator_type iterator; + /// Const forward iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + iterator it( &m_Head ); + ++it ; // skip dummy head + return it; + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( &m_Tail ); + } + + /// Returns a forward const iterator addressing the first element in a list + //@{ + const_iterator begin() const + { + return get_const_begin(); + } + const_iterator cbegin() + { + return get_const_begin(); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a list + //@{ + const_iterator end() const + { + return get_const_end(); + } + const_iterator cend() + { + return get_const_end(); + } + //@} + + private: + //@cond + const_iterator get_const_begin() const + { + const_iterator it( const_cast( &m_Head )); + ++it ; // skip dummy head + return it; + } + const_iterator get_const_end() const + { + return const_iterator( const_cast( &m_Tail )); + } + //@endcond + + public: + /// Default constructor initializes empty list + LazyList() + { + static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); + m_Head.m_pNext.store( marked_node_ptr( &m_Tail ), memory_model::memory_order_relaxed ); + } + + /// Destroys the list object + ~LazyList() + { + clear(); + + assert( m_Head.m_pNext.load(memory_model::memory_order_relaxed).ptr() == &m_Tail ); + m_Head.m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); + } + + /// Inserts new node + /** + The function inserts \p val in the list if the list does not contain + an item with key equal to \p val. + + Returns \p true if \p val is linked into the list, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert_at( &m_Head, val ); + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split new item creating into two part: + - create item with key only + - insert new item into the list + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. + While the functor \p f is working the item \p val is locked. + The user-defined functor is called only if the inserting is success and may be passed by reference + using boost::ref. + */ + template + bool insert( value_type& val, Func f ) + { + return insert_at( &m_Head, val, f ); + } + + /// Ensures that the \p item exists in the list + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the list, then \p val is inserted into the list. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + struct functor { + void operator()( bool bNew, value_type& item, value_type& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same thing. + + The functor may change non-key fields of the \p item. + While the functor \p f is calling the item \p item is locked. + + You may pass \p func argument by reference using boost::ref or cds::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + + template + std::pair ensure( value_type& val, Func func ) + { + return ensure_at( &m_Head, val, func ); + } + + /// Unlinks the item \p val from the list + /** + The function searches the item \p val in the list and unlink it from the list + if it is found and it is equal to \p val. + + Difference between \ref erase and \p unlink functions: \p erase finds a key + and deletes the item found. \p unlink finds an item by key and deletes it + only if \p val is an item of that list, i.e. the pointer to item found + is equal to &val . + + The function returns \p true if success and \p false otherwise. + + RCU \p synchronize method can be called. + Note that depending on RCU type used the \ref disposer call can be deferred. + + The function can throw cds::urcu::rcu_deadlock exception if deadlock is encountered and + deadlock checking policy is opt::v::rcu_throw_deadlock. + */ + bool unlink( value_type& val ) + { + return unlink_at( &m_Head, val ); + } + + /// Deletes the item from the list + /** \anchor cds_intrusive_LazyList_rcu_find_erase + The function searches an item with key equal to \p val in the list, + unlinks it from the list, and returns \p true. + If the item with the key equal to \p val is not found the function return \p false. + + RCU \p synchronize method can be called. + Note that depending on RCU type used the \ref disposer call can be deferred. + + The function can throw \ref cds_urcu_rcu_deadlock "cds::urcu::rcu_deadlock" exception if deadlock is encountered and + deadlock checking policy is opt::v::rcu_throw_deadlock. + */ + template + bool erase( Q const& val ) + { + return erase_at( &m_Head, val, key_comparator() ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_LazyList_rcu_find_erase "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& val, Less pred ) + { + return erase_at( &m_Head, val, cds::opt::details::make_comparator_from_less()); + } + + + /// Deletes the item from the list + /** \anchor cds_intrusive_LazyList_rcu_find_erase_func + The function searches an item with key equal to \p val in the list, + call \p func functor with item found, unlinks it from the list, and returns \p true. + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + The functor may be passed by reference using boost:ref + + If the item with the key equal to \p val is not found the function return \p false. + + RCU \p synchronize method can be called. + Note that depending on RCU type used the \ref disposer call can be deferred. + + The function can throw \ref cds_urcu_rcu_deadlock "cds::urcu::rcu_deadlock" exception if deadlock is encountered and + deadlock checking policy is opt::v::rcu_throw_deadlock. + */ + template + bool erase( Q const& val, Func func ) + { + return erase_at( &m_Head, val, key_comparator(), func ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_LazyList_rcu_find_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& val, Less pred, Func func ) + { + return erase_at( &m_Head, val, cds::opt::details::make_comparator_from_less(), func ); + } + + /// Extracts an item from the list + /** + \anchor cds_intrusive_LazyList_rcu_extract + The function searches an item with key equal to \p val in the list, + unlinks it from the list, and returns pointer to an item found in \p dest parameter. + If the item with the key equal to \p val is not found the function returns \p false, + \p dest is empty. + + @note The function does NOT call RCU read-side lock or synchronization, + and does NOT dispose the item found. It just excludes the item from the list + and returns a pointer to item found. + You should lock RCU before calling this function, and you should manually synchronize RCU + outside the RCU lock region before reusing returned pointer. + + \code + #include + #include + + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::intrusive::LazyList< rcu, Foo > rcu_lazy_list; + + rcu_lazy_list theList; + // ... + + rcu_lazy_list::exempt_ptr p1; + { + // first, we should lock RCU + rcu::scoped_lock sl; + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + if ( theList.extract( p1, 10 )) { + // do something with p1 + ... + } + } + + // We may safely release p1 here + // release() passes the pointer to RCU reclamation cycle: + // it invokes RCU retire_ptr function with the disposer you provided for the list. + p1.release(); + \endcode + */ + template + bool extract( exempt_ptr& dest, Q const& val ) + { + dest = extract_at( &m_Head, val, key_comparator() ); + return !dest.empty(); + } + + /// Extracts an item from the list using \p pred predicate for searching + /** + This function is the analog for \ref cds_intrusive_LazyList_rcu_extract "extract(exempt_ptr&, Q const&)". + + The \p pred is a predicate used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as \ref key_comparator. + */ + template + bool extract_with( exempt_ptr& dest, Q const& val, Less pred ) + { + dest = extract_at( &m_Head, val, cds::opt::details::make_comparator_from_less() ); + return !dest.empty(); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_LazyList_rcu_find_func + The function searches the item with key equal to \p val + and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. + While the functor \p f is calling the item found \p item is locked. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) const + { + return find_at( const_cast( &m_Head ), val, key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_LazyList_rcu_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& val, Less pred, Func f ) const + { + return find_at( const_cast( &m_Head ), val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_LazyList_rcu_find_cfunc + The function searches the item with key equal to \p val + and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. + While the functor \p f is calling the item found \p item is locked. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) const + { + return find_at( const_cast( &m_Head ), val, key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_LazyList_rcu_find_cfunc "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& val, Less pred, Func f ) const + { + return find_at( const_cast( &m_Head ), val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_LazyList_rcu_find_val + The function searches the item with key equal to \p val + and returns \p true if \p val found or \p false otherwise. + */ + template + bool find( Q const& val ) const + { + return find_at( const_cast( &m_Head ), val, key_comparator() ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_LazyList_rcu_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& val, Less pred ) const + { + return find_at( const_cast( &m_Head ), val, cds::opt::details::make_comparator_from_less() ); + } + + /// Finds the key \p val and return the item found + /** \anchor cds_intrusive_LazyList_rcu_get + The function searches the item with key equal to \p val and returns the pointer to item found. + If \p val is not found it returns \p NULL. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::intrusive::LazyList< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > ord_list; + ord_list theList; + // ... + { + // Lock RCU + ord_list::rcu_lock lock; + + foo * pVal = theList.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + // Unlock RCU by rcu_lock destructor + // pVal can be retired by disposer at any time after RCU has been unlocked + } + \endcode + */ + template + value_type * get( Q const& val ) const + { + return get_at( const_cast( &m_Head ), val, key_comparator()); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_intrusive_LazyList_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + value_type * get_with( Q const& val, Less pred ) const + { + return get_at( const_cast( &m_Head ), val, cds::opt::details::make_comparator_from_less()); + } + + /// Clears the list using default disposer + /** + The function clears the list using default (provided in class template) disposer functor. + + RCU \p synchronize method can be called. + Note that depending on RCU type used the \ref disposer call can be deferred. + + The function can throw cds::urcu::rcu_deadlock exception if deadlock is encountered and + deadlock checking policy is opt::v::rcu_throw_deadlock. + */ + void clear() + { + if( !empty() ) { + check_deadlock_policy::check(); + + node_type * pHead; + for (;;) { + { + rcu_lock l; + pHead = m_Head.m_pNext.load(memory_model::memory_order_acquire).ptr(); + if ( pHead == &m_Tail ) + break; + + m_Head.m_Lock.lock(); + pHead->m_Lock.lock(); + + if ( m_Head.m_pNext.load(memory_model::memory_order_relaxed).all() == pHead ) + unlink_node( &m_Head, pHead, &m_Head ); + + pHead->m_Lock.unlock(); + m_Head.m_Lock.unlock(); + } + + --m_ItemCounter; + dispose_node( pHead ); + } + } + } + + /// Checks if the list is empty + bool empty() const + { + return m_Head.m_pNext.load(memory_model::memory_order_relaxed).ptr() == &m_Tail; + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + protected: + //@cond + // split-list support + bool insert_aux_node( node_type * pNode ) + { + return insert_aux_node( &m_Head, pNode ); + } + + // split-list support + bool insert_aux_node( node_type * pHead, node_type * pNode ) + { + assert( pHead != null_ptr() ); + assert( pNode != null_ptr() ); + + // Hack: convert node_type to value_type. + // In principle, auxiliary node can be non-reducible to value_type + // We assume that comparator can correctly distinguish aux and regular node. + return insert_at( pHead, *node_traits::to_value_ptr( pNode ) ); + } + + bool insert_at( node_type * pHead, value_type& val, bool bLock = true ) + { + link_checker::is_empty( node_traits::to_node_ptr( val ) ); + position pos; + key_comparator cmp; + + rcu_lock l( bLock ); + while ( true ) { + search( pHead, val, pos ); + { + auto_lock_position alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // failed: key already in list + return false; + } + else { + link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); + ++m_ItemCounter; + return true; + } + } + } + } + } + + template + bool insert_at( node_type * pHead, value_type& val, Func f ) + { + link_checker::is_empty( node_traits::to_node_ptr( val ) ); + position pos; + key_comparator cmp; + + rcu_lock l; + while ( true ) { + search( pHead, val, pos ); + { + auto_lock_position alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // failed: key already in list + return false; + } + else { + link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); + cds::unref(f)( val ); + ++m_ItemCounter; + return true; + } + } + } + } + } + + iterator insert_at_( node_type * pHead, value_type& val, bool bLock = true ) + { + rcu_lock l( bLock ); + if ( insert_at( pHead, val, false )) + return iterator( node_traits::to_node_ptr( val )); + return end(); + } + + + template + std::pair ensure_at_( node_type * pHead, value_type& val, Func func, bool bLock = true ) + { + position pos; + key_comparator cmp; + + rcu_lock l( bLock ); + while ( true ) { + search( pHead, val, pos ); + { + auto_lock_position alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // key already in the list + + cds::unref(func)( false, *node_traits::to_value_ptr( *pos.pCur ) , val ); + return std::make_pair( iterator( pos.pCur ), false ); + } + else { + // new key + link_checker::is_empty( node_traits::to_node_ptr( val ) ); + + link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); + cds::unref(func)( true, val, val ); + ++m_ItemCounter; + return std::make_pair( iterator( node_traits::to_node_ptr( val )), true ); + } + } + } + } + } + + template + std::pair ensure_at( node_type * pHead, value_type& val, Func func, bool bLock = true ) + { + rcu_lock l( bLock ); + std::pair ret = ensure_at_( pHead, val, func, false ); + return std::make_pair( ret.first != end(), ret.second ); + } + + bool unlink_at( node_type * pHead, value_type& val ) + { + position pos; + key_comparator cmp; + check_deadlock_policy::check(); + + while ( true ) { + int nResult = 0; + { + rcu_lock l; + search( pHead, val, pos ); + { + auto_lock_position alp( pos ); + if ( validate( pos.pPred, pos.pCur ) ) { + if ( pos.pCur != &m_Tail + && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 + && node_traits::to_value_ptr( pos.pCur ) == &val ) + { + // item found + unlink_node( pos.pPred, pos.pCur, pHead ); + --m_ItemCounter; + nResult = 1; + } + else + nResult = -1; + } + } + } + + if ( nResult ) { + if ( nResult > 0 ) { + dispose_node( pos.pCur ); + return true; + } + return false; + } + } + } + + template + bool erase_at( node_type * pHead, Q const& val, Compare cmp, Func f, position& pos ) + { + check_deadlock_policy::check(); + + while ( true ) { + int nResult = 0; + { + rcu_lock l; + search( pHead, val, pos, cmp ); + { + auto_lock_position alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // key found + unlink_node( pos.pPred, pos.pCur, pHead ); + cds::unref(f)( *node_traits::to_value_ptr( *pos.pCur ) ); + --m_ItemCounter; + nResult = 1; + } + else { + nResult = -1; + } + } + } + } + + if ( nResult ) { + if ( nResult > 0 ) { + dispose_node( pos.pCur ); + return true; + } + return false; + } + } + } + + template + bool erase_at( node_type * pHead, Q const& val, Compare cmp, Func f ) + { + position pos; + return erase_at( pHead, val, cmp, f, pos ); + } + + template + bool erase_at( node_type * pHead, Q const& val, Compare cmp ) + { + position pos; +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_at( pHead, val, cmp, [](value_type const &){}, pos ); +# else + return erase_at( pHead, val, cmp, empty_erase_functor(), pos ); +# endif + } + + template + value_type * extract_at( node_type * pHead, Q const& val, Compare cmp ) + { + position pos; + assert( gc::is_locked() ) ; // RCU must be locked!!! + + while ( true ) { + search( pHead, val, pos, cmp ); + int nResult = 0; + { + auto_lock_position alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // key found + unlink_node( pos.pPred, pos.pCur, pHead ); + --m_ItemCounter; + nResult = 1; + } + else { + nResult = -1; + } + } + } + + if ( nResult ) { + if ( nResult > 0 ) + return node_traits::to_value_ptr( pos.pCur ); + return null_ptr(); + } + } + } + + template + bool find_at( node_type * pHead, Q& val, Compare cmp, Func f, bool bLock = true ) const + { + position pos; + + rcu_lock l( bLock ); + search( pHead, val, pos, cmp ); + if ( pos.pCur != &m_Tail ) { + cds::lock::scoped_lock< typename node_type::lock_type> al( pos.pCur->m_Lock ); + if ( cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) + { + cds::unref(f)( *node_traits::to_value_ptr( *pos.pCur ), val ); + return true; + } + } + return false; + } + + template + bool find_at( node_type * pHead, Q& val, Compare cmp ) const + { + rcu_lock l; + return find_at_( pHead, val, cmp ) != end(); + } + + template + const_iterator find_at_( node_type * pHead, Q& val, Compare cmp ) const + { + assert( gc::is_locked() ); + + position pos; + + search( pHead, val, pos, cmp ); + if ( pos.pCur != &m_Tail ) { + cds::lock::scoped_lock< typename node_type::lock_type> al( pos.pCur->m_Lock ); + if ( cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) + { + return const_iterator( pos.pCur ); + } + } + return end(); + } + + template + value_type * get_at( node_type * pHead, Q const& val, Compare cmp ) const + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + value_type * pFound = null_ptr(); + return find_at( pHead, val, cmp, [&pFound](value_type& found, Q const& ) { pFound = &found; } ) + ? pFound : null_ptr(); +# else + get_functor gf; + return find_at( pHead , val, cmp, cds::ref(gf) ) ? gf.pFound : null_ptr(); +# endif + } + + //@endcond + + protected: + //@cond + template + void search( node_type * pHead, Q const& key, position& pos ) const + { + search( pHead, key, pos, key_comparator() ); + } + + template + void search( node_type * pHead, Q const& key, position& pos, Compare cmp ) const + { + // RCU should be locked!!! + assert( gc::is_locked() ); + + node_type const* pTail = &m_Tail; + + marked_node_ptr pCur(pHead); + marked_node_ptr pPrev(pHead); + + while ( pCur.ptr() != pTail && ( pCur.ptr() == pHead || cmp( *node_traits::to_value_ptr( *pCur.ptr() ), key ) < 0 )) { + pPrev = pCur; + pCur = pCur->m_pNext.load(memory_model::memory_order_relaxed); + } + + pos.pCur = pCur.ptr(); + pos.pPred = pPrev.ptr(); + } + + static bool validate( node_type * pPred, node_type * pCur ) + { + // RCU lock should be locked!!! + assert( gc::is_locked() ); + + return !pPred->is_marked() + && !pCur->is_marked() + && pPred->m_pNext.load(memory_model::memory_order_relaxed) == pCur; + } + + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_LAZY_LIST_RCU_H diff --git a/cds/intrusive/michael_deque.h b/cds/intrusive/michael_deque.h new file mode 100644 index 00000000..8989034c --- /dev/null +++ b/cds/intrusive/michael_deque.h @@ -0,0 +1,993 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_MICHAEL_DEQUE_H +#define __CDS_INTRUSIVE_MICHAEL_DEQUE_H + +#include +#include +#include +#include +#include +#include + +#include + +namespace cds { namespace intrusive { + + //@cond + struct michael_deque_tag; + //@endcond + + /// MichaelDeque related definitions + /** @ingroup cds_intrusive_helper + */ + namespace michael_deque + { + /// Anchor contains left/right sibling items + /** + The anchor object is maintained by one CAS instruction. + */ + struct anchor + { + unsigned int idxLeft ; ///< Left sibling index; the most-significant bit contains left-stable flag + unsigned int idxRight ; ///< Right sibling index; the most-significant bit contains right-stable flag + +# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT + //@cond + anchor() CDS_NOEXCEPT_DEFAULTED = default; + anchor( anchor const& ) CDS_NOEXCEPT_DEFAULTED = default; + ~anchor() CDS_NOEXCEPT_DEFAULTED = default; + anchor& operator=(anchor const&) CDS_NOEXCEPT_DEFAULTED = default; +# if defined(CDS_MOVE_SEMANTICS_SUPPORT) && !defined(CDS_DISABLE_DEFAULT_MOVE_CTOR) + anchor( anchor&&) CDS_NOEXCEPT_DEFAULTED = default; + anchor& operator=(anchor&&) CDS_NOEXCEPT_DEFAULTED = default; +# endif + //@endcond +# else + /// Default ctor does not initialize left/right indices + anchor() CDS_NOEXCEPT + : idxLeft( 0 ) + , idxRight( 0 ) + { + static_check(); + } + + anchor( anchor const& a) CDS_NOEXCEPT + : idxLeft( a.idxLeft ) + , idxRight( a.idxRight ) + { + static_check(); + } +# endif + + /// Constructor sets \p left / \p right indices + anchor( unsigned int left, unsigned int right ) CDS_NOEXCEPT + : idxLeft( left ) + , idxRight( right ) + { + static_check(); + } + + /// Anchor equal operator + bool operator ==( anchor const& a) const CDS_NOEXCEPT + { + return idxLeft == a.idxLeft && idxRight == a.idxRight; + } + + /// Anchor non-equal operator + bool operator !=( anchor const& a) const CDS_NOEXCEPT + { + return !( *this == a ); + } + + private: + //@cond + static void static_check() + { + static_assert( sizeof(unsigned int) * 2 <= 8, "The index type must be no more than 32bit long" ); + static_assert( sizeof(anchor) <= 8, "The anchor type must be no more than 64bit long" ); + } + //@endcond + }; + + /// Michael's deque node + /** + Template parameters: + - GC - garbage collector + - Tag - a tag used to distinguish between different implementation + */ + template + struct node: public michael_list::node< GC, michael_deque_tag > + { + typedef GC gc ; ///< Garbage collector + typedef Tag tag ; ///< tag + + //@cond + typedef michael_list::node< gc, michael_deque_tag > mapper_node_type; + //@endcond + + typedef typename gc::template atomic_type< anchor > atomic_anchor ; ///< atomic reference to left/right node + + CDS_DATA_ALIGNMENT(8) atomic_anchor m_Links ; ///< Left/right sibling links + unsigned int m_nIndex; ///< Item index + + //@cond + node() + { + m_Links.store( anchor(0,0), CDS_ATOMIC::memory_order_release ); + } + + explicit node( anchor const& a ) + : m_Links() + , m_nIndex(0) + { + m_Links.store( a, CDS_ATOMIC::memory_order_release ); + } + //@endcond + }; + + //@cond + struct default_hook { + typedef cds::gc::default_gc gc; + typedef opt::none tag; + typedef unsigned int index_type; + }; + //@endcond + + //@cond + template < typename HookType, CDS_DECL_OPTIONS3> + struct hook + { + typedef typename opt::make_options< default_hook, CDS_OPTIONS3>::type options; + typedef typename options::gc gc; + typedef typename options::tag tag; + typedef typename options::index_type index_type; + + typedef node node_type; + typedef HookType hook_type; + }; + //@endcond + + + /// Base hook + /** + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - tag + - opt::index_type - integral index type + */ + template < CDS_DECL_OPTIONS3 > + struct base_hook: public hook< opt::base_hook_tag, CDS_OPTIONS3 > + {}; + + /// Member hook + /** + \p MemberOffset defines offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - tag + - opt::index_type - integral index type + */ + template < size_t MemberOffset, CDS_DECL_OPTIONS3 > + struct member_hook: public hook< opt::member_hook_tag, CDS_OPTIONS3 > + { + //@cond + static const size_t c_nMemberOffset = MemberOffset; + //@endcond + }; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - tag + - opt::index_type - integral index type + */ + template + struct traits_hook: public hook< opt::traits_hook_tag, CDS_OPTIONS3 > + { + //@cond + typedef NodeTraits node_traits; + //@endcond + }; + + /// Deque internal statistics. May be used for debugging or profiling + /** + Template argument \p Counter defines type of counter. + Default is cds::atomics::event_counter. + You may use other counter type like as cds::atomics::item_counter, + or even integral type, for example, \p int. + + The class extends intrusive::deque_stat interface for MichaelDeque. + */ + template + struct stat: public cds::intrusive::deque_stat + { + //@cond + typedef cds::intrusive::deque_stat base_class; + typedef typename base_class::counter_type counter_type; + //@endcond + + counter_type m_StabilizeFrontCount ; ///< stabilize left event count + counter_type m_StabilizeBackCount ; ///< stabilize right event count + + /// Register "stabilize left" event + void onStabilizeFront() { ++m_StabilizeFrontCount; } + + /// Register "stabilize right" event + void onStabilizeBack() { ++m_StabilizeBackCount; } + }; + + /// Dummy deque statistics - no counting is performed. Support interface like \ref michael_deque::stat + struct dummy_stat: public cds::intrusive::deque_dummy_stat + { + //@cond + void onStabilizeFront() {} + void onStabilizeBack() {} + //@endcond + }; + + //@cond + template < typename NodeType, opt::link_check_type LinkType> + struct link_checker + { + typedef NodeType node_type; + + static void is_empty( const node_type * pNode ) + { +# ifdef _DEBUG + anchor a = pNode->m_Links.load(CDS_ATOMIC::memory_order_relaxed); + assert( a.idxLeft == 0 && a.idxRight == 0 ); +# endif + } + }; + + template < typename NodeType> + struct link_checker + { + typedef NodeType node_type; + + static void is_empty( const node_type * /*pNode*/ ) + {} + }; + //@endcond + } // namespace michael_deque + + /// Michael's intrusive deque + /** @ingroup cds_intrusive_deque + Implementation of Michael's deque algorithm. + + \par Source: + [2003] Maged Michael "CAS-based Lock-free Algorithm for Shared Deque" + + Short description (from Michael's paper) + + The deque is represented as a doubly-linked list. Each node in the list contains two link pointers, + \p pRight and \p pLeft, and a data field. A shared variable, \p Anchor, holds the two anchor + pointers to the leftmost and rightmost nodes in the list, if any, and a three-value + status tag. Anchor must fit in a memory block that can be read and manipulated + using CAS or LL/SC, atomically. Initially both anchor pointers have null values + and the status tag holds the value stable, indicating an empty deque. + + The status tag serves to indicate if the deque is in an unstable state. When + a process finds the deque in an unstable state, it must first attempt to take it + to a stable state before attempting its own operation. + + The algorithm can use single-word CAS or LL/SC. + In \p libcds implementation of the algorithm the node contains two + 31bit link indices instead of pointers + one bit for status tag; + this trick allows use 64bit CAS to manipulate \p Anchor. Internal mapper + (based on MichaelHashSet intrusive container) + reflects link indices to item pointers. The maximum number of item in + the deque is limited by 2**31 that is practically unbounded. + + Template arguments: + - \p GC - garbage collector type: gc::HP or gc::PTB. Note that gc::HRC is not supported + - \p T - type to be stored in the queue, should be convertible to michael_deque::node + - \p Options - options + + Type of node: \ref michael_deque::node + + \p Options are: + - opt::hook - hook used. Possible values are: michael_deque::base_hook, michael_deque::member_hook, michael_deque::traits_hook. + If the option is not specified, michael_deque::base_hook<> is used. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. + - opt::disposer - the functor used for dispose removed items. Default is opt::v::empty_disposer. This option is used + in \ref pop_front and \ref pop_back functions. + - opt::link_checker - the type of node's link fields checking. Default is \ref opt::debug_check_link + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter (no item counting feature) + - opt::stat - the type to gather internal statistics. + Possible option value are: \ref michael_deque::stat, \ref michael_deque::dummy_stat, user-provided class that supports michael_deque::stat interface. + Default is \ref michael_deque::dummy_stat. + - opt::alignment - the alignment for internal deque data. Default is opt::cache_line_alignment + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - opt::allocator - allocator using for internal memory mapper based on MichaelHashSet. Default is CDS_DEFAULT_ALLOCATOR. + */ + template + class MichaelDeque + { + //@cond + struct default_options + { + typedef cds::backoff::empty back_off; + typedef michael_deque::base_hook<> hook; + typedef opt::v::empty_disposer disposer; + typedef atomicity::empty_item_counter item_counter; + typedef michael_deque::dummy_stat stat; + typedef opt::v::relaxed_ordering memory_model; + static const opt::link_check_type link_checker = opt::debug_check_link; + enum { alignment = opt::cache_line_alignment }; + typedef CDS_DEFAULT_ALLOCATOR allocator; + }; + //@endcond + + public: + //@cond + typedef typename opt::make_options< + typename cds::opt::find_type_traits< default_options, CDS_OPTIONS10 >::type + ,CDS_OPTIONS10 + >::type options; + //@endcond + + private: + //@cond + typedef typename std::conditional< + std::is_same >::value + ,michael_deque::stat<> + ,typename std::conditional< + std::is_same::value + ,michael_deque::dummy_stat + ,typename options::stat + >::type + >::type stat_type_; + //@endcond + + + public: + typedef T value_type ; ///< type of value stored in the deque + typedef typename options::hook hook ; ///< hook type + typedef typename hook::node_type node_type ; ///< node type + typedef typename options::disposer disposer ; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + typedef michael_deque::link_checker< node_type, options::link_checker > link_checker ; ///< link checker + + typedef GC gc ; ///< Garbage collector + typedef typename options::back_off back_off ; ///< back-off strategy + typedef typename options::item_counter item_counter ; ///< Item counting policy used + typedef stat_type_ stat ; ///< Internal statistics policy used + typedef typename options::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename options::allocator allocator_type ; ///< Allocator using for internal memory mapping + + typedef typename node_type::atomic_anchor atomic_anchor ; ///< Atomic anchor + + protected: + //@cond + class index_mapper + { + struct node_less_comparator + { + bool operator ()( value_type const & n1, value_type const& n2) const + { + return node_traits::to_node_ptr(n1)->m_nIndex < node_traits::to_node_ptr(n2)->m_nIndex; + } + bool operator ()( unsigned int i, value_type const& n2) const + { + return i < node_traits::to_node_ptr(n2)->m_nIndex; + } + bool operator ()( value_type const & n1, unsigned int i) const + { + return node_traits::to_node_ptr(n1)->m_nIndex < i; + } + }; + + struct internal_disposer + { + void operator()( value_type * p ) + { + assert( p != null_ptr()); + + MichaelDeque::clear_links( node_traits::to_node_ptr(p) ); + disposer()( p ); + } + }; + + struct mapper_node_traits + { + typedef typename node_type::mapper_node_type mapper_node_type; + + static mapper_node_type * to_node_ptr( value_type& v ) + { + return static_cast( node_traits::to_node_ptr(v) ); + } + + static mapper_node_type * to_node_ptr( value_type * v ) + { + return static_cast( node_traits::to_node_ptr(v) ); + } + + static mapper_node_type const * to_node_ptr( value_type const& v ) + { + return static_cast( node_traits::to_node_ptr(v) ); + } + + static mapper_node_type const * to_node_ptr( value_type const * v ) + { + return static_cast( node_traits::to_node_ptr(v) ); + } + + static value_type * to_value_ptr( mapper_node_type& n ) + { + return node_traits::to_value_ptr( static_cast(n)); + } + + static value_type * to_value_ptr( mapper_node_type * n ) + { + return node_traits::to_value_ptr( static_cast(n)); + } + + static const value_type * to_value_ptr( mapper_node_type const& n ) + { + return node_traits::to_value_ptr( static_cast(n)); + } + + static const value_type * to_value_ptr( mapper_node_type const * n ) + { + return node_traits::to_value_ptr( static_cast(n)); + } + }; + + typedef MichaelList< gc, value_type, + typename michael_list::make_traits< + opt::hook< michael_list::traits_hook< + mapper_node_traits + ,cds::opt::gc< gc > + ,cds::opt::tag > + > + ,opt::less< node_less_comparator > + ,opt::back_off< back_off > + ,opt::disposer< internal_disposer > + ,opt::memory_model< memory_model > + >::type + > mapper_ordered_list; + + struct mapper_hash { + size_t operator()( value_type const& v ) const + { + return cds::opt::v::hash()( node_traits::to_node_ptr(v)->m_nIndex ); + } + size_t operator()( unsigned int i ) const + { + return cds::opt::v::hash()(i); + } + }; + + typedef MichaelHashSet< gc, mapper_ordered_list, + typename michael_set::make_traits< + opt::hash< mapper_hash > + ,opt::allocator< allocator_type > + >::type + > mapper_type; + +# if !(defined(CDS_CXX11_LAMBDA_SUPPORT) && !((CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL) && _MSC_VER < 1700)) + struct at_functor { + node_type * pNode; + + at_functor() + : pNode( null_ptr()) + {} + + void operator()( value_type& v, unsigned int nIdx ) + { + pNode = node_traits::to_node_ptr(v); + assert( pNode->m_nIndex == nIdx ); + } + }; +# endif + + mapper_type m_set; + CDS_ATOMIC::atomic m_nLastIndex; + + public: + + index_mapper( size_t nEstimatedItemCount, size_t nLoadFactor ) + : m_set( nEstimatedItemCount, nLoadFactor ) + , m_nLastIndex(1) + {} + + unsigned int map( value_type& v ) + { + while ( true ) { + node_type * pNode = node_traits::to_node_ptr( v ); + pNode->m_nIndex = m_nLastIndex.fetch_add( 1, memory_model::memory_order_relaxed ); + if ( pNode->m_nIndex && m_set.insert( v )) + return pNode->m_nIndex; + } + } + + bool unmap( unsigned int nIdx ) + { + return m_set.erase( nIdx ); + } + + node_type * at( unsigned int nIdx ) + { +# if defined(CDS_CXX11_LAMBDA_SUPPORT) && !((CDS_COMPILER == CDS_COMPILER_MSVC ||CDS_COMPILER == CDS_COMPILER_INTEL) && _MSC_VER < 1700) + // MS VC++2010 bug: error C2955: 'cds::intrusive::node_traits' : use of class template requires template argument list + // see declaration of 'cds::intrusive::node_traits' + node_type * pNode = null_ptr(); + if ( m_set.find( nIdx, + [&pNode](value_type& v, unsigned int nIdx) { + pNode = node_traits::to_node_ptr(v); + assert( pNode->m_nIndex == nIdx ); + }) + ) + return pNode; +# else + at_functor f; + if ( m_set.find( nIdx, cds::ref(f) )) + return f.pNode; +# endif + return null_ptr(); + } + }; + //@endcond + public: + + /// Rebind template arguments + template + struct rebind { + typedef MichaelDeque< GC2, T2, CDS_OTHER_OPTIONS10> other ; ///< Rebinding result + }; + + protected: + typename cds::opt::details::alignment_setter< atomic_anchor, options::alignment >::type m_Anchor ; ///< Left/right heads + typename cds::opt::details::alignment_setter< index_mapper, options::alignment >::type m_Mapper ; ///< Memory mapper + + item_counter m_ItemCounter ; ///< item counter + stat m_Stat ; ///< Internal statistics + + //@cond + static const unsigned int c_nIndexMask = ((unsigned int)(0 - 1)) >> 1; + static const unsigned int c_nFlagMask = ((unsigned int)(1)) << (sizeof(unsigned int) * 8 - 1); + static const unsigned int c_nEmptyIndex = 0; + //@endcond + + private: + //@cond + typedef michael_deque::anchor CDS_TYPE_ALIGNMENT(8) anchor_type; + typedef intrusive::node_to_value node_to_value; + + static void clear_links( node_type * pNode ) + { + pNode->m_Links.store( anchor_type(), memory_model::memory_order_release ); + } + + enum anchor_status { + Stable, + RPush, + LPush + }; + + static anchor_status status( anchor_type const& a ) + { + if ( a.idxLeft & c_nFlagMask ) + return LPush; + if ( a.idxRight & c_nFlagMask ) + return RPush; + return Stable; + } + + static unsigned int index( unsigned int i ) + { + return i & c_nIndexMask; + } + + void stabilize( anchor_type& a ) + { + switch ( status(a)) { + case LPush: + stabilize_front(a); + break; + case RPush: + stabilize_back(a); + break; + default: + break; + } + } + + void stabilize_front( anchor_type& a ) + { + m_Stat.onStabilizeFront(); + + typename gc::template GuardArray<3> guards; + node_type * pLeft; + node_type * pRight; + unsigned int const idxLeft = index( a.idxLeft ); + unsigned int const idxRight = index( a.idxRight ); + + guards.assign( 0, node_traits::to_value_ptr( pLeft = m_Mapper.at( idxLeft )) ); + guards.assign( 1, node_traits::to_value_ptr( pRight = m_Mapper.at( idxRight )) ); + if ( m_Anchor.load( memory_model::memory_order_acquire ) != a ) + return; + + unsigned int idxPrev = index( pLeft->m_Links.load(memory_model::memory_order_relaxed ).idxRight ); + node_type * pPrev; + guards.assign( 2, node_traits::to_value_ptr( pPrev = m_Mapper.at( idxPrev )) ); + if ( m_Anchor.load( memory_model::memory_order_acquire ) != a ) + return; + + anchor_type prevLinks( pPrev->m_Links.load( memory_model::memory_order_acquire )); + if ( index( prevLinks.idxLeft ) != idxLeft ) { + if ( m_Anchor.load( memory_model::memory_order_acquire ) != a ) + return; + + if ( !pPrev->m_Links.compare_exchange_strong( prevLinks, anchor_type( idxLeft, prevLinks.idxRight ), memory_model::memory_order_release, memory_model::memory_order_relaxed )) + return; + } + + // clear RPush/LPush flags + m_Anchor.compare_exchange_weak( a, anchor_type(idxLeft, idxRight), memory_model::memory_order_release, memory_model::memory_order_relaxed ); + } + + void stabilize_back( anchor_type& a ) + { + m_Stat.onStabilizeBack(); + + typename gc::template GuardArray<3> guards; + node_type * pLeft; + node_type * pRight; + unsigned int const idxLeft = index( a.idxLeft ); + unsigned int const idxRight = index( a.idxRight ); + + guards.assign( 0, node_traits::to_value_ptr( pLeft = m_Mapper.at( idxLeft )) ); + guards.assign( 1, node_traits::to_value_ptr( pRight = m_Mapper.at( idxRight )) ); + if ( m_Anchor.load( memory_model::memory_order_acquire ) != a ) + return; + + unsigned int idxPrev = index( pRight->m_Links.load(memory_model::memory_order_relaxed ).idxLeft ); + node_type * pPrev; + guards.assign( 2, node_traits::to_value_ptr( pPrev = m_Mapper.at( idxPrev )) ); + if ( m_Anchor.load( memory_model::memory_order_acquire ) != a ) + return; + + anchor_type prevLinks( pPrev->m_Links.load( memory_model::memory_order_acquire )); + if ( index( prevLinks.idxRight ) != idxRight ) { + if ( m_Anchor.load( memory_model::memory_order_acquire ) != a ) + return; + + if ( !pPrev->m_Links.compare_exchange_strong( prevLinks, anchor_type( prevLinks.idxLeft, idxRight ), memory_model::memory_order_release, memory_model::memory_order_relaxed )) + return; + } + + // clear RPush/LPush flags + m_Anchor.compare_exchange_weak( a, anchor_type(idxLeft, idxRight), memory_model::memory_order_release, memory_model::memory_order_relaxed ); + } + + //@endcond + + protected: + //@cond + struct pop_result { + value_type * pPopped; + unsigned int nIdxPopped; + typename gc::template GuardArray<2> guards; + }; + + void dispose_result( pop_result& res ) + { + m_Mapper.unmap( res.nIdxPopped ); + } + + bool do_pop_back( pop_result& res ) + { + back_off bkoff; + anchor_type a; + + while ( true ) { + a = m_Anchor.load( memory_model::memory_order_acquire ); + + if ( a.idxRight == c_nEmptyIndex ) { + m_Stat.onPopEmpty(); + return false; + } + + if ( a.idxLeft == a.idxRight ) { + if ( m_Anchor.compare_exchange_weak( a, anchor_type( c_nEmptyIndex, c_nEmptyIndex ), memory_model::memory_order_release, memory_model::memory_order_relaxed )) + break; + bkoff(); + } + else if ( status( a ) == Stable ) { + unsigned int idxLeft = index( a.idxLeft ); + unsigned int idxRight = index( a.idxRight ); + node_type * pLeft; + res.guards.assign( 0, node_traits::to_value_ptr( pLeft = m_Mapper.at( idxLeft )) ); + node_type * pRight; + res.guards.assign( 1, node_traits::to_value_ptr( pRight = m_Mapper.at( idxRight )) ); + + if ( m_Anchor.load( memory_model::memory_order_acquire ) != a ) { + m_Stat.onPopBackContention(); + continue; + } + + unsigned int nPrev = pRight->m_Links.load( memory_model::memory_order_acquire ).idxLeft; + if ( m_Anchor.compare_exchange_weak( a, anchor_type( a.idxLeft, nPrev ), memory_model::memory_order_release, memory_model::memory_order_relaxed ) ) + break; + bkoff(); + m_Stat.onPopBackContention(); + } + else + stabilize( a ); + } + + res.nIdxPopped = a.idxRight; + res.pPopped = node_traits::to_value_ptr( m_Mapper.at( a.idxRight )); + + --m_ItemCounter; + m_Stat.onPopBack(); + + return true; + } + + bool do_pop_front( pop_result& res ) + { + back_off bkoff; + anchor_type a; + + while ( true ) { + a = m_Anchor.load( memory_model::memory_order_acquire ); + + if ( a.idxLeft == c_nEmptyIndex ) { + m_Stat.onPopEmpty(); + return false; + } + + if ( a.idxLeft == a.idxRight ) { + if ( m_Anchor.compare_exchange_weak( a, anchor_type( c_nEmptyIndex, c_nEmptyIndex ), memory_model::memory_order_release, memory_model::memory_order_relaxed )) + break; + bkoff(); + } + else if ( status( a ) == Stable ) { + unsigned int idxLeft = index( a.idxLeft ); + unsigned int idxRight = index( a.idxRight ); + node_type * pLeft; + res.guards.assign( 0, node_traits::to_value_ptr( pLeft = m_Mapper.at( idxLeft )) ); + node_type * pRight; + res.guards.assign( 1, node_traits::to_value_ptr( pRight = m_Mapper.at( idxRight )) ); + + if ( m_Anchor.load( memory_model::memory_order_acquire ) != a ) { + m_Stat.onPopFrontContention(); + continue; + } + + unsigned int nPrev = pLeft->m_Links.load( memory_model::memory_order_acquire ).idxRight; + if ( m_Anchor.compare_exchange_weak( a, anchor_type( nPrev, a.idxRight ), memory_model::memory_order_release, memory_model::memory_order_relaxed ) ) + break; + bkoff(); + m_Stat.onPopFrontContention(); + } + else + stabilize( a ); + } + + res.nIdxPopped = a.idxLeft; + res.pPopped = node_traits::to_value_ptr( m_Mapper.at( a.idxLeft )); + + --m_ItemCounter; + m_Stat.onPopFront(); + + return true; + } + + //@endcond + + public: + /// Default constructor + /** + Initializes the deque object with up to 2**16 - 2 items + */ + MichaelDeque() + :m_Anchor() + ,m_Mapper( 4096, 4 ) + { + m_Anchor.store( anchor_type( c_nEmptyIndex, c_nEmptyIndex ), CDS_ATOMIC::memory_order_release ); + + // GC and node_type::gc must be the same + static_assert(( std::is_same::value ), "GC and node_type::gc must be the same"); + + // cds::gc::HRC is not allowed + static_assert(( !std::is_same::value ), "cds::gc::HRC is not allowed here"); + } + + /// Constructor + /** + Initializes the deque object with estimated item count \p nMaxItemCount. + \p nLoadFactor is a parameter of internal memory mapper based on MichaelHashSet; + see MichaelHashSet ctor for details + */ + MichaelDeque( unsigned int nMaxItemCount, unsigned int nLoadFactor = 4 ) + :m_Anchor() + ,m_Mapper( nMaxItemCount, nLoadFactor ) + { + m_Anchor.store( anchor_type( c_nEmptyIndex, c_nEmptyIndex ), CDS_ATOMIC::memory_order_release ); + + // GC and node_type::gc must be the same + static_assert(( std::is_same::value ), "GC and node_type::gc must be the same"); + + // cds::gc::HRC is not allowed + static_assert(( !std::is_same::value ), "cds::gc::HRC is not allowed here"); + } + + /// Destructor clears the deque + ~MichaelDeque() + { + clear(); + } + + public: + /// Push back (right) side + /** + Push new item \p val to right side of the deque. + */ + bool push_back( value_type& val ) + { + back_off bkoff; + + node_type * pNode = node_traits::to_node_ptr( val ); + link_checker::is_empty( pNode ); + + unsigned int nIdx = m_Mapper.map( val ); + if ( nIdx == c_nEmptyIndex ) + return false; + + while ( true ) { + anchor_type a = m_Anchor.load( memory_model::memory_order_acquire ); + if ( a.idxRight == c_nEmptyIndex ) { + if ( m_Anchor.compare_exchange_weak( a, anchor_type( nIdx, nIdx ), memory_model::memory_order_release, memory_model::memory_order_relaxed )) + break; + bkoff(); + m_Stat.onPushBackContention(); + } + else if ( status(a) == Stable ) { + pNode->m_Links.store( anchor_type( a.idxRight, c_nEmptyIndex ), memory_model::memory_order_release ); + anchor_type aNew( a.idxLeft, nIdx | c_nFlagMask ); + if ( m_Anchor.compare_exchange_weak( a, aNew, memory_model::memory_order_release, memory_model::memory_order_relaxed) ) { + stabilize_back( aNew ); + break; + } + bkoff(); + m_Stat.onPushBackContention(); + } + else + stabilize( a ); + } + + ++m_ItemCounter; + m_Stat.onPushBack(); + return true; + } + + /// Push front (left) side + /** + Push new item \p val to left side of the deque. + */ + bool push_front( value_type& val ) + { + back_off bkoff; + node_type * pNode = node_traits::to_node_ptr( val ); + link_checker::is_empty( pNode ); + + unsigned int nIdx = m_Mapper.map( val ); + if ( nIdx == c_nEmptyIndex ) + return false; + + while ( true ) { + anchor_type a = m_Anchor.load( memory_model::memory_order_acquire ); + if ( a.idxLeft == c_nEmptyIndex ) { + if ( m_Anchor.compare_exchange_weak( a, anchor_type( nIdx, nIdx ), memory_model::memory_order_release, memory_model::memory_order_relaxed )) + break; + bkoff(); + m_Stat.onPushFrontContention(); + } + else if ( status(a) == Stable ) { + pNode->m_Links.store( anchor_type( c_nEmptyIndex, a.idxLeft ), memory_model::memory_order_release ); + anchor_type aNew( nIdx | c_nFlagMask, a.idxRight ); + if ( m_Anchor.compare_exchange_weak( a, aNew, memory_model::memory_order_release, memory_model::memory_order_relaxed )) { + stabilize_front( aNew ); + break; + } + bkoff(); + m_Stat.onPushFrontContention(); + } + else + stabilize( a ); + } + + ++m_ItemCounter; + m_Stat.onPushFront(); + return true; + } + + /// Pop back + /** + Pops rightmost item from the deque. If the deque is empty then returns \p NULL. + + For popped object the disposer specified in \p Options template parameters is called. + */ + value_type * pop_back() + { + pop_result res; + if ( do_pop_back( res )) { + dispose_result( res ); + return res.pPopped; + } + + return null_ptr(); + } + + /// Pop front + /** + Pops leftmost item from the deque. If the deque is empty then returns \p NULL. + + For popped object the disposer specified in \p Options template parameters is called. + */ + value_type * pop_front() + { + pop_result res; + if ( do_pop_front( res )) { + dispose_result( res ); + return res.pPopped; + } + + return null_ptr(); + } + + /// Returns deque's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact does not mean that the deque + is empty. To check deque emptyness use \ref empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Checks if the dequeue is empty + bool empty() const + { + anchor_type a = m_Anchor.load( memory_model::memory_order_relaxed ); + return a.idxLeft == c_nEmptyIndex && a.idxRight == c_nEmptyIndex; + } + + /// Clear the deque + /** + The function repeatedly calls \ref pop_back until it returns \p NULL. + The disposer defined in template \p Options is called for each item + that can be safely disposed. + */ + void clear() + { + while ( pop_back() != null_ptr() ); + } + + /// Returns reference to internal statistics + const stat& statistics() const + { + return m_Stat; + } + }; + + +}} // namespace cds::intrusive + + +#endif // #ifndef __CDS_INTRUSIVE_MICHAEL_DEQUE_H diff --git a/cds/intrusive/michael_list_base.h b/cds/intrusive/michael_list_base.h new file mode 100644 index 00000000..96fff3f3 --- /dev/null +++ b/cds/intrusive/michael_list_base.h @@ -0,0 +1,263 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_MICHAEL_LIST_BASE_H +#define __CDS_INTRUSIVE_MICHAEL_LIST_BASE_H + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// MichaelList ordered list related definitions + /** @ingroup cds_intrusive_helper + */ + namespace michael_list { + /// Michael's list node + /** + Template parameters: + - GC - garbage collector + - Tag - a tag used to distinguish between different implementation + */ + template + struct node + { + typedef GC gc ; ///< Garbage collector + typedef Tag tag ; ///< tag + + typedef cds::details::marked_ptr marked_ptr ; ///< marked pointer + typedef typename gc::template atomic_marked_ptr< marked_ptr> atomic_marked_ptr ; ///< atomic marked pointer specific for GC + + atomic_marked_ptr m_pNext ; ///< pointer to the next node in the container + + CDS_CONSTEXPR node() CDS_NOEXCEPT + : m_pNext( null_ptr() ) + {} + }; + + //@cond + template + struct node_cleaner { + void operator()( Node * p ) + { + typedef typename Node::marked_ptr marked_ptr; + p->m_pNext.store( marked_ptr(), MemoryModel::memory_order_release ); + } + }; + //@endcond + + //@cond + struct undefined_gc; + struct default_hook { + typedef undefined_gc gc; + typedef opt::none tag; + }; + //@endcond + + //@cond + template < typename HookType, CDS_DECL_OPTIONS2> + struct hook + { + typedef typename opt::make_options< default_hook, CDS_OPTIONS2>::type options; + typedef typename options::gc gc; + typedef typename options::tag tag; + typedef node node_type; + typedef HookType hook_type; + }; + //@endcond + + /// Base hook + /** + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - tag + */ + template < CDS_DECL_OPTIONS2 > + struct base_hook: public hook< opt::base_hook_tag, CDS_OPTIONS2 > + {}; + + /// Member hook + /** + \p MemberOffset defines offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - tag + */ + template < size_t MemberOffset, CDS_DECL_OPTIONS2 > + struct member_hook: public hook< opt::member_hook_tag, CDS_OPTIONS2 > + { + //@cond + static const size_t c_nMemberOffset = MemberOffset; + //@endcond + }; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - tag + */ + template + struct traits_hook: public hook< opt::traits_hook_tag, CDS_OPTIONS2 > + { + //@cond + typedef NodeTraits node_traits; + //@endcond + }; + + /// Check link + template + struct link_checker + { + //@cond + typedef Node node_type; + //@endcond + + /// Checks if the link field of node \p pNode is \p NULL + /** + An asserting is generated if \p pNode link field is not \p NULL + */ + static void is_empty( const node_type * pNode ) + { + assert( pNode->m_pNext.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr() ); + } + }; + + //@cond + template + struct link_checker_selector; + + template + struct link_checker_selector< GC, Node, opt::never_check_link > + { + typedef intrusive::opt::v::empty_link_checker type; + }; + + template + struct link_checker_selector< GC, Node, opt::debug_check_link > + { +# ifdef _DEBUG + typedef link_checker type; +# else + typedef intrusive::opt::v::empty_link_checker type; +# endif + }; + + template + struct link_checker_selector< GC, Node, opt::always_check_link > + { + typedef link_checker type; + }; + //@endcond + + /// Metafunction for selecting appropriate link checking policy + template < typename Node, opt::link_check_type LinkType > + struct get_link_checker + { + //@cond + typedef typename link_checker_selector< typename Node::gc, Node, LinkType>::type type; + //@endcond + }; + + /// Type traits for MichaelList class + struct type_traits + { + /// Hook used + /** + Possible values are: michael_list::base_hook, michael_list::member_hook, michael_list::traits_hook. + */ + typedef base_hook<> hook; + + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// specifies binary predicate used for key compare. + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// back-off strategy used + /** + If the option is not specified, the cds::backoff::Default is used. + */ + typedef cds::backoff::Default back_off; + + /// Disposer + /** + the functor used for dispose removed items. Default is opt::v::empty_disposer. + */ + typedef opt::v::empty_disposer disposer; + + /// Item counter + /** + The type for item counting feature. + Default is no item counter (\ref atomicity::empty_item_counter) + */ + typedef atomicity::empty_item_counter item_counter; + + /// Link fields checking feature + /** + Default is \ref opt::debug_check_link + */ + static const opt::link_check_type link_checker = opt::debug_check_link; + + /// C++ memory ordering model + /** + List of available memory ordering see opt::memory_model + */ + typedef opt::v::relaxed_ordering memory_model; + + /// RCU deadlock checking policy (only for \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList") + /** + List of available options see opt::rcu_check_deadlock + */ + typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; + }; + + /// Metafunction converting option list to traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + \p Options list see \ref MichaelList. + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS9 >::type + ,CDS_OPTIONS9 + >::type type; + //typedef typename cds::opt::make_options< type_traits, CDS_OPTIONS9>::type type ; ///< Result of metafunction +# endif + }; + + } // namespace michael_list + + //@cond + // Forward declaration + template < class GC, typename T, class Traits = michael_list::type_traits > + class MichaelList; + //@endcond + + + /// Tag for selecting Michael list + //class michael_list_tag; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_MICHAEL_LIST_BASE_H diff --git a/cds/intrusive/michael_list_hp.h b/cds/intrusive/michael_list_hp.h new file mode 100644 index 00000000..f5065810 --- /dev/null +++ b/cds/intrusive/michael_list_hp.h @@ -0,0 +1,9 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_MICHAEL_LIST_HP_H +#define __CDS_INTRUSIVE_MICHAEL_LIST_HP_H + +#include +#include + +#endif // #ifndef __CDS_INTRUSIVE_MICHAEL_LIST_HP_H diff --git a/cds/intrusive/michael_list_hrc.h b/cds/intrusive/michael_list_hrc.h new file mode 100644 index 00000000..bded07f9 --- /dev/null +++ b/cds/intrusive/michael_list_hrc.h @@ -0,0 +1,76 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_MICHAEL_LIST_HRC_H +#define __CDS_INTRUSIVE_MICHAEL_LIST_HRC_H + +#include +#include + +namespace cds { namespace intrusive { namespace michael_list { + //@cond + // Specialization for HRC GC + template + struct node< gc::HRC, Tag>: public gc::HRC::container_node + { + typedef gc::HRC gc ; ///< Garbage collector + typedef Tag tag ; ///< tag + + typedef cds::details::marked_ptr marked_ptr ; ///< marked pointer + typedef typename gc::atomic_marked_ptr< marked_ptr> atomic_marked_ptr ; ///< atomic marked pointer + atomic_marked_ptr m_pNext ; ///< pointer to the next node in the stack + + node() + : m_pNext( null_ptr() ) + {} + + protected: + virtual void cleanUp( cds::gc::hrc::ThreadGC * pGC ) + { + assert( pGC != NULL ); + typename gc::GuardArray<2> aGuards( *pGC ); + + while ( true ) { + marked_ptr pNextMarked( aGuards.protect( 0, m_pNext )); + node * pNext = pNextMarked.ptr(); + if ( pNext && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) { + marked_ptr p = aGuards.protect( 1, pNext->m_pNext ); + m_pNext.compare_exchange_strong( pNextMarked, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ); + continue; + } + else { + break; + } + } + } + + virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent ) + { + if ( bConcurrent ) { + marked_ptr pNext = m_pNext.load(CDS_ATOMIC::memory_order_acquire); + do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ); + } + else { + m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed ); + } + } + }; + //@endcond + + + //@cond + template + struct link_checker_selector< gc::HRC, NODE, opt::never_check_link > + { + typedef link_checker type; + }; + + template + struct link_checker_selector< gc::HRC, NODE, opt::debug_check_link > + { + typedef link_checker type; + }; + //@endcond + +}}} // namespace cds::intrusive::michael_list + +#endif // #ifndef __CDS_INTRUSIVE_MICHAEL_LIST_HP_H diff --git a/cds/intrusive/michael_list_impl.h b/cds/intrusive/michael_list_impl.h new file mode 100644 index 00000000..bf6a1b9a --- /dev/null +++ b/cds/intrusive/michael_list_impl.h @@ -0,0 +1,1174 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_MICHAEL_LIST_IMPL_H +#define __CDS_INTRUSIVE_MICHAEL_LIST_IMPL_H + +#include +#include + +namespace cds { namespace intrusive { + + /// Michael's lock-free ordered single-linked list + /** @ingroup cds_intrusive_list + \anchor cds_intrusive_MichaelList_hp + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Source: + - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" + + Template arguments: + - \p GC - Garbage collector used. Note the \p GC must be the same as the GC used for item type \p T (see michael_list::node). + - \p T - type to be stored in the list. The type must be based on michael_list::node (for michael_list::base_hook) + or it must have a member of type michael_list::node (for michael_list::member_hook). + - \p Traits - type traits. See michael_list::type_traits for explanation. + + It is possible to declare option-based list with cds::intrusive::michael_list::make_traits metafunction istead of \p Traits template + argument. + + Template argument list \p Options of cds::intrusive::michael_list::make_traits metafunction are: + - opt::hook - hook used. Possible values are: michael_list::base_hook, michael_list::member_hook, michael_list::traits_hook. + If the option is not specified, michael_list::base_hook<> and gc::HP is used. + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::Default is used. + - opt::disposer - the functor used for dispose removed items. Default is opt::v::empty_disposer. Due the nature + of GC schema the disposer may be called asynchronously. + - opt::link_checker - the type of node's link fields checking. Default is \ref opt::debug_check_link + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + + For example, the following traits-based declaration of gc::HP Michael's list + \code + #include + // Declare item stored in your list + struct item: public cds::intrusive::michael_list::node< cds::gc::HP > + { + int nKey; + // .... other data + }; + + // Declare comparator for the item + struct my_compare { + int operator()( item const& i1, item const& i2 ) const + { + return i1.nKey - i2.nKey; + } + }; + + // Declare type_traits + struct my_traits: public cds::intrusive::michael_list::type_traits + { + typedef cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::HP > > hook; + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::intrusive::MichaelList< cds::gc::HP, item, my_traits > traits_based_list; + \endcode + + is equivalent for the following option-based list + \code + #include + + // item struct and my_compare are the same + + // Declare option-based list + typedef cds::intrusive::MichaelList< cds::gc::HP, item, + typename cds::intrusive::michael_list::make_traits< + cds::intrusive::opt::hook< cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::HP > > > // hook option + ,cds::intrusive::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + \par Usage + There are different specializations of this template for each garbage collecting schema used. + You should select GC needed and include appropriate .h-file: + - for gc::HP: \code #include \endcode + - for gc::PTB: \code #include \endcode + - for gc::HRC: \code #include \endcode + - for \ref cds_urcu_gc "RCU type" - see \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList" + - for gc::nogc: \code #include \endcode + See \ref cds_intrusive_MichaelList_nogc "non-GC MichaelList" + + Then, you should incorporate michael_list::node into your struct \p T and provide + appropriate michael_list::type_traits::hook in your \p Traits template parameters. Usually, for \p Traits you + define a struct based on michael_list::type_traits. + + Example for gc::PTB and base hook: + \code + // Include GC-related Michael's list specialization + #include + + // Data stored in Michael's list + struct my_data: public cds::intrusive::michael_list::node< cds::gc::PTB > + { + // key field + std::string strKey; + + // other data + // ... + }; + + // my_data comparing functor + struct my_data_cmp { + int operator()( const my_data& d1, const my_data& d2 ) + { + return d1.strKey.compare( d2.strKey ); + } + + int operator()( const my_data& d, const std::string& s ) + { + return d.strKey.compare(s); + } + + int operator()( const std::string& s, const my_data& d ) + { + return s.compare( d.strKey ); + } + }; + + + // Declare type_traits + struct my_traits: public cds::intrusive::michael_list::type_traits + { + typedef cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::PTB > > hook; + typedef my_data_cmp compare; + }; + + // Declare list type + typedef cds::intrusive::MichaelList< cds::gc::PTB, my_data, my_traits > traits_based_list; + \endcode + + Equivalent option-based code: + \code + // GC-related specialization + #include + + struct my_data { + // see above + }; + struct compare { + // see above + }; + + // Declare option-based list + typedef cds::intrusive::MichaelList< cds::gc::PTB + ,my_data + , typename cds::intrusive::michael_list::make_traits< + cds::intrusive::opt::hook< cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::PTB > > > + ,cds::intrusive::opt::compare< my_data_cmp > + >::type + > option_based_list; + + \endcode + */ + template < + class GC + ,typename T +#ifdef CDS_DOXYGEN_INVOKED + ,class Traits = michael_list::type_traits +#else + ,class Traits +#endif + > + class MichaelList + { + public: + typedef T value_type ; ///< type of value stored in the list + typedef Traits options ; ///< Traits template parameter + + typedef typename options::hook hook ; ///< hook type + typedef typename hook::node_type node_type ; ///< node type + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter. +# else + typedef typename opt::details::make_comparator< value_type, options >::type key_comparator; +# endif + + typedef typename options::disposer disposer ; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + typedef typename michael_list::get_link_checker< node_type, options::link_checker >::type link_checker ; ///< link checker + + typedef GC gc ; ///< Garbage collector + typedef typename options::back_off back_off ; ///< back-off strategy + typedef typename options::item_counter item_counter ; ///< Item counting policy used + typedef typename options::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + + typedef cds::gc::guarded_ptr< gc, value_type > guarded_ptr; ///< Guarded pointer + + //@cond + // Rebind options (split-list support) + template + struct rebind_options { + typedef MichaelList< + gc + , value_type + , typename cds::opt::make_options< options, CDS_OPTIONS7>::type + > type; + }; + //@endcond + + protected: + typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic node pointer + typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer + + typedef atomic_node_ptr auxiliary_head ; ///< Auxiliary head type (for split-list support) + + atomic_node_ptr m_pHead ; ///< Head pointer + item_counter m_ItemCounter ; ///< Item counter + + //@cond + /// Position pointer for item search + struct position { + atomic_node_ptr * pPrev ; ///< Previous node + node_type * pCur ; ///< Current node + node_type * pNext ; ///< Next node + + typename gc::template GuardArray<3> guards ; ///< Guards array + + enum { + guard_prev_item, + guard_current_item, + guard_next_item + }; + }; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_erase_functor { + void operator()( value_type const & item ) + {} + }; +# endif + + struct clean_disposer { + void operator()( value_type * p ) + { + michael_list::node_cleaner()( node_traits::to_node_ptr( p ) ); + disposer()( p ); + } + }; + + //@endcond + + protected: + //@cond + void retire_node( node_type * pNode ) + { + assert( pNode != null_ptr() ); + gc::template retire( node_traits::to_value_ptr( *pNode ) ); + } + + bool link_node( node_type * pNode, position& pos ) + { + assert( pNode != null_ptr() ); + link_checker::is_empty( pNode ); + + marked_node_ptr cur(pos.pCur); + pNode->m_pNext.store( cur, memory_model::memory_order_relaxed ); + return pos.pPrev->compare_exchange_strong( cur, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + } + + bool unlink_node( position& pos ) + { + assert( pos.pPrev != null_ptr() ); + assert( pos.pCur != null_ptr() ); + + // Mark the node (logical deleting) + marked_node_ptr next(pos.pNext, 0); + if ( pos.pCur->m_pNext.compare_exchange_strong( next, marked_node_ptr(pos.pNext, 1), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { + // physical deletion may be performed by search function if it detects that a node is logically deleted (marked) + // CAS may be successful here or in other thread that searching something + marked_node_ptr cur(pos.pCur); + if ( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + retire_node( pos.pCur ); + return true; + } + return false; + } + //@endcond + + protected: + //@cond + template + class iterator_type + { + friend class MichaelList; + + protected: + value_type * m_pNode; + typename gc::Guard m_Guard; + + void next() + { + if ( m_pNode ) { + typename gc::Guard g; + node_type * pCur = node_traits::to_node_ptr( *m_pNode ); + + marked_node_ptr pNext; + do { + pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed); + g.assign( node_traits::to_value_ptr( pNext.ptr() )); + } while ( pNext != pCur->m_pNext.load(memory_model::memory_order_acquire) ); + + if ( pNext.ptr() ) { + m_pNode = m_Guard.assign( g.template get() ); + } + else { + m_pNode = null_ptr(); + m_Guard.clear(); + } + } + } + + iterator_type( atomic_node_ptr const& pNode ) + { + for (;;) { + marked_node_ptr p = pNode.load(memory_model::memory_order_relaxed); + if ( p.ptr() ) { + m_pNode = m_Guard.assign( node_traits::to_value_ptr( p.ptr() ) ); + } + else { + m_pNode = null_ptr(); + m_Guard.clear(); + } + if ( p == pNode.load(memory_model::memory_order_acquire) ) + break; + } + } + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + : m_pNode( null_ptr() ) + {} + + iterator_type( iterator_type const& src ) + { + if ( src.m_pNode ) { + m_pNode = m_Guard.assign( src.m_pNode ); + } + else + m_pNode = null_ptr(); + } + + value_ptr operator ->() const + { + return m_pNode; + } + + value_ref operator *() const + { + assert( m_pNode != null_ptr() ); + return *m_pNode; + } + + /// Pre-increment + iterator_type& operator ++() + { + next(); + return *this; + } + + iterator_type& operator = (iterator_type const& src) + { + m_pNode = src.m_pNode; + m_Guard.assign( m_pNode ); + return *this; + } + + /* + /// Post-increment + void operator ++(int) + { + next(); + } + */ + + template + bool operator ==(iterator_type const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator_type const& i ) const + { + return m_pNode != i.m_pNode; + } + }; + //@endcond + + public: + /// Forward iterator + /** + The forward iterator for Michael's list has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. + For some GC (gc::HP, gc::HRC), a guard is limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if a limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary since it contains GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the list. + + Therefore, the use of iterators in concurrent environment is not good idea. Use the iterator on the concurrent container + for debug purpose only. + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + */ + typedef iterator_type iterator; + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_pHead ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to NULL. + + The returned value can be used only to control reaching the end of the list. + For empty list begin() == end() + */ + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() + { + return const_iterator( m_pHead ); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + return const_iterator( m_pHead ); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() + { + return const_iterator(); + } + + public: + /// Default constructor initializes empty list + MichaelList() + : m_pHead(null_ptr()) + { + static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); + } + + /// Destroys the list object + ~MichaelList() + { + clear(); + } + + /// Inserts new node + /** + The function inserts \p val in the list if the list does not contain + an item with key equal to \p val. + + Returns \p true if \p val is linked into the list, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert_at( m_pHead, val ); + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split new item creating into two part: + - create item with key only + - insert new item into the list + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this list's item by concurrent threads. + The user-defined functor is called only if the inserting is success and may be passed by reference + using boost::ref. + */ + template + bool insert( value_type& val, Func f ) + { + return insert_at( m_pHead, val, f ); + } + + /// Ensures that the \p item exists in the list + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the list, then \p val is inserted into the list. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same thing. + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref or cds::ref. + + Returns std::pair where \p first is \p true if operation is successfull, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair ensure( value_type& val, Func func ) + { + return ensure_at( m_pHead, val, func ); + } + + /// Unlinks the item \p val from the list + /** + The function searches the item \p val in the list and unlink it from the list + if it is found and it is equal to \p val. + + Difference between \ref erase and \p unlink functions: \p erase finds a key + and deletes the item found. \p unlink finds an item by key and deletes it + only if \p val is an item of that list, i.e. the pointer to item found + is equal to &val . + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + return unlink_at( m_pHead, val ); + } + + /// Deletes the item from the list + /** \anchor cds_intrusive_MichaelList_hp_erase_val + The function searches an item with key equal to \p val in the list, + unlinks it from the list, and returns \p true. + If the item with the key equal to \p val is not found the function return \p false. + */ + template + bool erase( Q const& val ) + { + return erase_at( m_pHead, val, key_comparator() ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelList_hp_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& val, Less pred ) + { + return erase_at( m_pHead, val, cds::opt::details::make_comparator_from_less()); + } + + /// Deletes the item from the list + /** \anchor cds_intrusive_MichaelList_hp_erase_func + The function searches an item with key equal to \p val in the list, + call \p func functor with item found, unlinks it from the list, and returns \p true. + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + The functor may be passed by reference using boost:ref + + If the item with the key equal to \p val is not found the function return \p false. + */ + template + bool erase( Q const& val, Func func ) + { + return erase_at( m_pHead, val, key_comparator(), func ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelList_hp_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& val, Less pred, Func f ) + { + return erase_at( m_pHead, val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Extracts the item from the list with specified \p key + /** \anchor cds_intrusive_MichaelList_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the list, and returns it in \p dest parameter. + If the item with key equal to \p key is not found the function returns \p false. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + The \ref disposer specified in \p Traits class template parameter is called automatically + by garbage collector \p GC when returned \ref guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::intrusive::MichaelList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp; + theList.extract( gp, 5 ); + // Deal with gp + // ... + + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + bool extract( guarded_ptr& dest, Q const& key ) + { + return extract_at( m_pHead, dest.guard(), key, key_comparator() ); + } + + /// Extracts the item using compare functor \p pred + /** + The function is an analog of \ref cds_intrusive_MichaelList_hp_extract "extract(guarded_ptr&, Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool extract_with( guarded_ptr& dest, Q const& key, Less pred ) + { + return extract_at( m_pHead, dest.guard(), key, cds::opt::details::make_comparator_from_less() ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_MichaelList_hp_find_func + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the function is only guarantee + that \p item cannot be disposed during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return find_at( m_pHead, val, key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelList_hp_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { + return find_at( m_pHead, val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_MichaelList_hp_find_cfunc + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You may pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the function is only guarantee + that \p item cannot be disposed during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return find_at( m_pHead, val, key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelList_hp_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { + return find_at( m_pHead, val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_MichaelList_hp_find_val + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise + */ + template + bool find( Q const & val ) + { + return find_at( m_pHead, val, key_comparator() ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelList_hp_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& val, Less pred ) + { + return find_at( m_pHead, val, cds::opt::details::make_comparator_from_less() ); + } + + /// Finds the key \p val and return the item found + /** \anchor cds_intrusive_MichaelList_hp_get + The function searches the item with key equal to \p val + and assigns the item found to guarded pointer \p ptr. + The function returns \p true if \p val is found, and \p false otherwise. + If \p val is not found the \p ptr parameter is not changed. + + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC automatically when returned \ref guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::intrusive::MichaelList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp; + if ( theList.get( gp, 5 )) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool get( guarded_ptr& ptr, Q const& val ) + { + return get_at( m_pHead, ptr.guard(), val, key_comparator() ); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_intrusive_MichaelList_hp_get "get( guarded_ptr& ptr, Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool get_with( guarded_ptr& ptr, Q const& val, Less pred ) + { + return get_at( m_pHead, ptr.guard(), val, cds::opt::details::make_comparator_from_less() ); + } + + /// Clears the list + /** + The function unlink all items from the list. + */ + void clear() + { + typename gc::Guard guard; + marked_node_ptr head; + while ( true ) { + head = m_pHead.load(memory_model::memory_order_relaxed); + if ( head.ptr() ) + guard.assign( node_traits::to_value_ptr( *head.ptr() )); + if ( m_pHead.load(memory_model::memory_order_acquire) == head ) { + if ( head.ptr() == null_ptr() ) + break; + value_type& val = *node_traits::to_value_ptr( *head.ptr() ); + unlink( val ); + } + } + } + + /// Checks if the list is empty + bool empty() const + { + return m_pHead.load(memory_model::memory_order_relaxed).all() == null_ptr(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + protected: + //@cond + // split-list support + bool insert_aux_node( node_type * pNode ) + { + return insert_aux_node( m_pHead, pNode ); + } + + // split-list support + bool insert_aux_node( atomic_node_ptr& refHead, node_type * pNode ) + { + assert( pNode != null_ptr() ); + + // Hack: convert node_type to value_type. + // In principle, auxiliary node can be non-reducible to value_type + // We assume that comparator can correctly distinguish aux and regular node. + return insert_at( refHead, *node_traits::to_value_ptr( pNode ) ); + } + + bool insert_at( atomic_node_ptr& refHead, value_type& val ) + { + node_type * pNode = node_traits::to_node_ptr( val ); + link_checker::is_empty( pNode ); + position pos; + + while ( true ) { + if ( search( refHead, val, pos, key_comparator() ) ) + return false; + + if ( link_node( pNode, pos ) ) { + ++m_ItemCounter; + return true; + } + + // clear next field + pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); + } + } + + template + bool insert_at( atomic_node_ptr& refHead, value_type& val, Func f ) + { + node_type * pNode = node_traits::to_node_ptr( val ); + link_checker::is_empty( pNode ); + position pos; + + while ( true ) { + if ( search( refHead, val, pos, key_comparator() ) ) + return false; + + typename gc::Guard guard; + guard.assign( &val ); + if ( link_node( pNode, pos ) ) { + cds::unref(f)( val ); + ++m_ItemCounter; + return true; + } + + // clear next field + pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); + } + } + + template + std::pair ensure_at( atomic_node_ptr& refHead, value_type& val, Func func ) + { + position pos; + + node_type * pNode = node_traits::to_node_ptr( val ); + while ( true ) { + if ( search( refHead, val, pos, key_comparator() ) ) { + if ( pos.pCur->m_pNext.load(memory_model::memory_order_acquire).bits() ) { + back_off()(); + continue ; // the node found is marked as deleted + } + assert( key_comparator()( val, *node_traits::to_value_ptr( *pos.pCur ) ) == 0 ); + + unref(func)( false, *node_traits::to_value_ptr( *pos.pCur ) , val ); + return std::make_pair( true, false ); + } + else { + typename gc::Guard guard; + guard.assign( &val ); + if ( link_node( pNode, pos ) ) { + ++m_ItemCounter; + unref(func)( true, val, val ); + return std::make_pair( true, true ); + } + // clear next field + pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); + } + } + } + + bool unlink_at( atomic_node_ptr& refHead, value_type& val ) + { + position pos; + + back_off bkoff; + while ( search( refHead, val, pos, key_comparator() ) ) { + if ( node_traits::to_value_ptr( *pos.pCur ) == &val ) { + if ( unlink_node( pos ) ) { + --m_ItemCounter; + return true; + } + else + bkoff(); + } + else + break; + } + return false; + } + + template + bool erase_at( atomic_node_ptr& refHead, const Q& val, Compare cmp, Func f, position& pos ) + { + back_off bkoff; + while ( search( refHead, val, pos, cmp )) { + if ( unlink_node( pos ) ) { + cds::unref(f)( *node_traits::to_value_ptr( *pos.pCur ) ); + --m_ItemCounter; + return true; + } + else + bkoff(); + } + return false; + } + + template + bool erase_at( atomic_node_ptr& refHead, const Q& val, Compare cmp, Func f ) + { + position pos; + return erase_at( refHead, val, cmp, f, pos ); + } + + template + bool erase_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) + { + position pos; +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_at( refHead, val, cmp, [](value_type const&){}, pos ); +# else + return erase_at( refHead, val, cmp, empty_erase_functor(), pos ); +# endif + } + + template + bool extract_at( atomic_node_ptr& refHead, typename gc::Guard& dest, Q const& val, Compare cmp ) + { + position pos; + back_off bkoff; + while ( search( refHead, val, pos, cmp )) { + if ( unlink_node( pos ) ) { + dest.assign( pos.guards.template get( position::guard_current_item ) ); + --m_ItemCounter; + return true; + } + else + bkoff(); + } + return false; + } + + template + bool find_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) + { + position pos; + return search( refHead, val, pos, cmp ); + } + + template + bool find_at( atomic_node_ptr& refHead, Q& val, Compare cmp, Func f ) + { + position pos; + if ( search( refHead, val, pos, cmp )) { + cds::unref(f)( *node_traits::to_value_ptr( *pos.pCur ), val ); + return true; + } + return false; + } + + template + bool get_at( atomic_node_ptr& refHead, typename gc::Guard& guard, Q const& val, Compare cmp ) + { + position pos; + if ( search( refHead, val, pos, cmp )) { + guard.assign( pos.guards.template get( position::guard_current_item )); + return true; + } + return false; + } + + //@endcond + + protected: + + //@cond + template + bool search( atomic_node_ptr& refHead, const Q& val, position& pos, Compare cmp ) + { + atomic_node_ptr * pPrev; + marked_node_ptr pNext; + marked_node_ptr pCur; + + back_off bkoff; + +try_again: + pPrev = &refHead; + pNext = null_ptr(); + + pCur = pPrev->load(memory_model::memory_order_relaxed); + pos.guards.assign( position::guard_current_item, node_traits::to_value_ptr( pCur.ptr() ) ); + if ( pPrev->load(memory_model::memory_order_acquire) != pCur.ptr() ) + goto try_again; + + while ( true ) { + if ( pCur.ptr() == null_ptr() ) { + pos.pPrev = pPrev; + pos.pCur = pCur.ptr(); + pos.pNext = pNext.ptr(); + return false; + } + + pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed); + pos.guards.assign( position::guard_next_item, node_traits::to_value_ptr( pNext.ptr() )); + if ( pCur->m_pNext.load(memory_model::memory_order_relaxed).all() != pNext.all() ) { + bkoff(); + goto try_again; + } + + if ( pPrev->load(memory_model::memory_order_relaxed).all() != pCur.ptr() ) { + bkoff(); + goto try_again; + } + + // pNext contains deletion mark for pCur + if ( pNext.bits() == 1 ) { + // pCur marked i.e. logically deleted. Help the erase/unlink function to unlink pCur node + marked_node_ptr cur( pCur.ptr()); + if ( pPrev->compare_exchange_strong( cur, marked_node_ptr( pNext.ptr() ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { + retire_node( pCur.ptr() ); + } + else { + bkoff(); + goto try_again; + } + } + else { + assert( pCur.ptr() != null_ptr() ); + int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val ); + if ( nCmp >= 0 ) { + pos.pPrev = pPrev; + pos.pCur = pCur.ptr(); + pos.pNext = pNext.ptr(); + return nCmp == 0; + } + pPrev = &( pCur->m_pNext ); + pos.guards.assign( position::guard_prev_item, node_traits::to_value_ptr( pCur.ptr() ) ); + } + pCur = pNext; + pos.guards.assign( position::guard_current_item, node_traits::to_value_ptr( pCur.ptr() )); + } + } + //@endcond + }; +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_MICHAEL_LIST_IMPL_H diff --git a/cds/intrusive/michael_list_nogc.h b/cds/intrusive/michael_list_nogc.h new file mode 100644 index 00000000..107c04b5 --- /dev/null +++ b/cds/intrusive/michael_list_nogc.h @@ -0,0 +1,644 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_MICHAEL_LIST_NOGC_H +#define __CDS_INTRUSIVE_MICHAEL_LIST_NOGC_H + +#include +#include + +namespace cds { namespace intrusive { + + namespace michael_list { + /// Michael list node + /** + Template parameters: + - Tag - a tag used to distinguish between different implementation + */ + template + struct node + { + typedef gc::nogc gc ; ///< Garbage collector + typedef Tag tag ; ///< tag + + typedef CDS_ATOMIC::atomic< node * > atomic_ptr ; ///< atomic marked pointer + + atomic_ptr m_pNext ; ///< pointer to the next node in the container + + node() + : m_pNext( null_ptr()) + {} + }; + + } // namespace michael_list + + /// Michael's lock-free ordered single-linked list (template specialization for gc::nogc) + /** @ingroup cds_intrusive_list + \anchor cds_intrusive_MichaelList_nogc + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of item. + + See \ref cds_intrusive_MichaelList_hp "MichaelList" for description of template parameters. + + The interface of the specialization is a slightly different. + */ + template < typename T, class Traits > + class MichaelList + { + public: + typedef T value_type ; ///< type of value stored in the queue + typedef Traits options ; ///< Traits template parameter + + typedef typename options::hook hook ; ///< hook type + typedef typename hook::node_type node_type ; ///< node type + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter. +# else + typedef typename opt::details::make_comparator< value_type, options >::type key_comparator; +# endif + + typedef typename options::disposer disposer ; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + typedef typename michael_list::get_link_checker< node_type, options::link_checker >::type link_checker ; ///< link checker + + typedef gc::nogc gc ; ///< Garbage collector + typedef typename options::back_off back_off ; ///< back-off strategy + typedef typename options::item_counter item_counter ; ///< Item counting policy used + typedef typename options::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + + //@cond + // Rebind options (split-list support) + template + struct rebind_options { + typedef MichaelList< + gc + , value_type + , typename cds::opt::make_options< options, CDS_OPTIONS7>::type + > type; + }; + //@endcond + + protected: + typedef typename node_type::atomic_ptr atomic_node_ptr ; ///< Atomic node pointer + typedef atomic_node_ptr auxiliary_head ; ///< Auxiliary head type (for split-list support) + + atomic_node_ptr m_pHead ; ///< Head pointer + item_counter m_ItemCounter ; ///< Item counter + + //@cond + /// Position pointer for item search + struct position { + atomic_node_ptr * pPrev ; ///< Previous node + node_type * pCur ; ///< Current node + node_type * pNext ; ///< Next node + }; + //@endcond + + protected: + //@cond + void clear_links( node_type * pNode ) + { + pNode->m_pNext.store( null_ptr(), memory_model::memory_order_release ); + } + + template + void dispose_node( node_type * pNode, Disposer disp ) + { + clear_links( pNode ); + cds::unref(disp)( node_traits::to_value_ptr( *pNode )); + } + + template + void dispose_value( value_type& val, Disposer disp ) + { + dispose_node( node_traits::to_node_ptr( val ), disp ); + } + + bool link_node( node_type * pNode, position& pos ) + { + assert( pNode != null_ptr() ); + link_checker::is_empty( pNode ); + + pNode->m_pNext.store( pos.pCur, memory_model::memory_order_relaxed ); + return pos.pPrev->compare_exchange_strong( pos.pCur, pNode, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + } + //@endcond + + protected: + //@cond + template + class iterator_type + { + friend class MichaelList; + value_type * m_pNode; + + void next() + { + if ( m_pNode ) { + node_type * pNode = node_traits::to_node_ptr( *m_pNode )->m_pNext.load(memory_model::memory_order_acquire); + if ( pNode ) + m_pNode = node_traits::to_value_ptr( *pNode ); + else + m_pNode = null_ptr(); + } + } + + protected: + explicit iterator_type( node_type * pNode) + { + if ( pNode ) + m_pNode = node_traits::to_value_ptr( *pNode ); + else + m_pNode = null_ptr(); + } + explicit iterator_type( atomic_node_ptr const& refNode) + { + node_type * pNode = refNode.load(memory_model::memory_order_relaxed); + if ( pNode ) + m_pNode = node_traits::to_value_ptr( *pNode ); + else + m_pNode = null_ptr(); + } + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + : m_pNode(null_ptr()) + {} + + iterator_type( const iterator_type& src ) + : m_pNode( src.m_pNode ) + {} + + value_ptr operator ->() const + { + return m_pNode; + } + + value_ref operator *() const + { + assert( m_pNode != null_ptr() ); + return *m_pNode; + } + + /// Pre-increment + iterator_type& operator ++() + { + next(); + return *this; + } + + /// Post-increment + iterator_type operator ++(int) + { + iterator_type i(*this); + next(); + return i; + } + + iterator_type& operator = (const iterator_type& src) + { + m_pNode = src.m_pNode; + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator_type const& i ) const + { + return m_pNode != i.m_pNode; + } + }; + //@endcond + + public: + /// Forward iterator + typedef iterator_type iterator; + /// Const forward iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator(m_pHead.load(memory_model::memory_order_relaxed) ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to NULL. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + return const_iterator(m_pHead.load(memory_model::memory_order_relaxed) ); + } + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() + { + return const_iterator(m_pHead.load(memory_model::memory_order_relaxed) ); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator(); + } + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator(); + } + + public: + /// Default constructor initializes empty list + MichaelList() + : m_pHead( null_ptr()) + { + static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); + } + + /// Destroys the list objects + ~MichaelList() + { + clear(); + } + + /// Inserts new node + /** + The function inserts \p val in the list if the list does not contain + an item with key equal to \p val. + + Returns \p true if \p val is linked into the list, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert_at( m_pHead, val ); + } + + /// Ensures that the \p item exists in the list + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the list, then \p val is inserted into the list. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + struct functor { + void operator()( bool bNew, value_type& item, value_type& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You can pass \p func argument by value or by reference using boost::ref or cds::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + */ + + template + std::pair ensure( value_type& val, Func func ) + { + return ensure_at( m_pHead, val, func ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_MichaelList_nogc_find_func + The function searches the item with key equal to \p val + and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. + The function \p find does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return find_at( m_pHead, val, key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelList_nogc_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { + return find_at( m_pHead, val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_MichaelList_nogc_find_cfunc + The function searches the item with key equal to \p val + and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. + The function \p find does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return find_at( m_pHead, val, key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelList_nogc_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { + return find_at( m_pHead, val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_MichaelList_nogc_find_val + The function searches the item with key equal to \p val + and returns pointer to value found or \p NULL. + */ + template + value_type * find( Q const & val ) + { + return find_at( m_pHead, val, key_comparator() ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelList_nogc_find_val "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + value_type * find_with( Q const& val, Less pred ) + { + return find_at( m_pHead, val, cds::opt::details::make_comparator_from_less()); + } + + /// Clears the list + /** + The function unlink all items from the list. + + For each unlinked item the item disposer \p disp is called after unlinking. + */ + template + void clear( Disposer disp ) + { + node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed); + do {} while ( !m_pHead.compare_exchange_weak( pHead, null_ptr(), memory_model::memory_order_relaxed )); + + while ( pHead ) { + node_type * p = pHead->m_pNext.load(memory_model::memory_order_relaxed); + dispose_node( pHead, disp ); + pHead = p; + --m_ItemCounter; + } + } + + /// Clears the list using default disposer + /** + The function clears the list using default (provided in class template) disposer functor. + */ + void clear() + { + clear( disposer() ); + } + + /// Checks if the list is empty + bool empty() const + { + return m_pHead.load(memory_model::memory_order_relaxed) == null_ptr(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomics::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + protected: + //@cond + // split-list support + bool insert_aux_node( node_type * pNode ) + { + return insert_aux_node( m_pHead, pNode ); + } + + // split-list support + bool insert_aux_node( atomic_node_ptr& refHead, node_type * pNode ) + { + assert( pNode != null_ptr() ); + + // Hack: convert node_type to value_type. + // In principle, auxiliary node can be non-reducible to value_type + // We assume that comparator can correctly distinguish aux and regular node. + return insert_at( refHead, *node_traits::to_value_ptr( pNode ) ); + } + + bool insert_at( atomic_node_ptr& refHead, value_type& val ) + { + link_checker::is_empty( node_traits::to_node_ptr( val ) ); + position pos; + + while ( true ) { + if ( search( refHead, val, key_comparator(), pos ) ) + return false; + + if ( link_node( node_traits::to_node_ptr( val ), pos ) ) { + ++m_ItemCounter; + return true; + } + } + } + + iterator insert_at_( atomic_node_ptr& refHead, value_type& val ) + { + if ( insert_at( refHead, val )) + return iterator( node_traits::to_node_ptr( val )); + return end(); + } + + template + std::pair ensure_at_( atomic_node_ptr& refHead, value_type& val, Func func ) + { + position pos; + + while ( true ) { + if ( search( refHead, val, key_comparator(), pos ) ) { + assert( key_comparator()( val, *node_traits::to_value_ptr( *pos.pCur ) ) == 0 ); + + unref(func)( false, *node_traits::to_value_ptr( *pos.pCur ) , val ); + return std::make_pair( iterator( pos.pCur ), false ); + } + else { + link_checker::is_empty( node_traits::to_node_ptr( val ) ); + + if ( link_node( node_traits::to_node_ptr( val ), pos ) ) { + ++m_ItemCounter; + unref(func)( true, val , val ); + return std::make_pair( iterator( node_traits::to_node_ptr( val )), true ); + } + } + } + } + + template + std::pair ensure_at( atomic_node_ptr& refHead, value_type& val, Func func ) + { + std::pair ret = ensure_at_( refHead, val, func ); + return std::make_pair( ret.first != end(), ret.second ); + } + + template + bool find_at( atomic_node_ptr& refHead, Q& val, Compare cmp, Func f ) + { + position pos; + + if ( search( refHead, val, cmp, pos ) ) { + assert( pos.pCur != null_ptr() ); + unref(f)( *node_traits::to_value_ptr( *pos.pCur ), val ); + return true; + } + return false; + } + + template + value_type * find_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) + { + iterator it = find_at_( refHead, val, cmp ); + if ( it != end() ) + return &*it; + return null_ptr(); + } + + template + iterator find_at_( atomic_node_ptr& refHead, Q const& val, Compare cmp ) + { + position pos; + + if ( search( refHead, val, cmp, pos ) ) { + assert( pos.pCur != null_ptr() ); + return iterator( pos.pCur ); + } + return end(); + } + + //@endcond + + protected: + + //@cond + template + bool search( atomic_node_ptr& refHead, const Q& val, Compare cmp, position& pos ) + { + atomic_node_ptr * pPrev; + node_type * pNext; + node_type * pCur; + + back_off bkoff; + + try_again: + pPrev = &refHead; + pCur = pPrev->load(memory_model::memory_order_acquire); + pNext = null_ptr(); + + while ( true ) { + if ( !pCur ) { + pos.pPrev = pPrev; + pos.pCur = pCur; + pos.pNext = pNext; + return false; + } + + pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed); + if ( pCur->m_pNext.load(memory_model::memory_order_acquire) != pNext ) { + bkoff(); + goto try_again; + } + + if ( pPrev->load(memory_model::memory_order_acquire) != pCur ) { + bkoff(); + goto try_again; + } + + assert( pCur != null_ptr() ); + int nCmp = cmp( *node_traits::to_value_ptr( *pCur ), val ); + if ( nCmp >= 0 ) { + pos.pPrev = pPrev; + pos.pCur = pCur; + pos.pNext = pNext; + return nCmp == 0; + } + pPrev = &( pCur->m_pNext ); + pCur = pNext; + } + } + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_MICHAEL_LIST_NOGC_H diff --git a/cds/intrusive/michael_list_ptb.h b/cds/intrusive/michael_list_ptb.h new file mode 100644 index 00000000..4fd613b2 --- /dev/null +++ b/cds/intrusive/michael_list_ptb.h @@ -0,0 +1,9 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_MICHAEL_LIST_PTB_H +#define __CDS_INTRUSIVE_MICHAEL_LIST_PTB_H + +#include +#include + +#endif // #ifndef __CDS_INTRUSIVE_MICHAEL_LIST_PTB_H diff --git a/cds/intrusive/michael_list_rcu.h b/cds/intrusive/michael_list_rcu.h new file mode 100644 index 00000000..61258440 --- /dev/null +++ b/cds/intrusive/michael_list_rcu.h @@ -0,0 +1,1080 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_MICHAEL_LIST_RCU_H +#define __CDS_INTRUSIVE_MICHAEL_LIST_RCU_H + +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// Michael's lock-free ordered single-linked list (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_intrusive_list + \anchor cds_intrusive_MichaelList_rcu + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p T - type to be stored in the list; the type \p T should be based on (or has a member of type) + cds::intrusive::micheal_list::node + - \p Traits - type traits. See michael_list::type_traits for explanation. + + It is possible to declare option-based list with \p %cds::intrusive::michael_list::make_traits metafunction istead of \p Traits template + argument. Template argument list \p Options of cds::intrusive::michael_list::make_traits metafunction are: + - opt::hook - hook used. Possible values are: michael_list::base_hook, michael_list::member_hook, michael_list::traits_hook. + If the option is not specified, michael_list::base_hook<> is used. + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::disposer - the functor used for dispose removed items. Default is opt::v::empty_disposer + - opt::rcu_check_deadlock - a deadlock checking policy. Default is opt::v::rcu_throw_deadlock + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + + \par Usage + Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" you should include: + \code + #include + #include + + // Now, you can declare Michael's list for type Foo and default traits: + typedef cds::intrusive::MichaelList >, Foo > rcu_michael_list; + \endcode + */ + template < typename RCU, typename T, class Traits > + class MichaelList, T, Traits> + { + public: + typedef T value_type ; ///< type of value stored in the queue + typedef Traits options ; ///< Traits template parameter + + typedef typename options::hook hook ; ///< hook type + typedef typename hook::node_type node_type ; ///< node type + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter. +# else + typedef typename opt::details::make_comparator< value_type, options >::type key_comparator; +# endif + + typedef typename options::disposer disposer ; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + typedef typename michael_list::get_link_checker< node_type, options::link_checker >::type link_checker ; ///< link checker + + typedef cds::urcu::gc gc ; ///< RCU schema + typedef typename options::back_off back_off ; ///< back-off strategy + typedef typename options::item_counter item_counter; ///< Item counting policy used + typedef typename options::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename options::rcu_check_deadlock rcu_check_deadlock ; ///< Deadlock checking policy + + typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock + static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = true; ///< Group of \p extract_xxx functions require external locking + + //@cond + // Rebind options (split-list support) + template + struct rebind_options { + typedef MichaelList< + gc + , value_type + , typename cds::opt::make_options< options, CDS_OPTIONS7>::type + > type; + }; + //@endcond + + protected: + typedef typename node_type::marked_ptr marked_node_ptr ; ///< Marked node pointer + typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic node pointer + typedef atomic_node_ptr auxiliary_head ; ///< Auxiliary head type (for split-list support) + + atomic_node_ptr m_pHead ; ///< Head pointer + item_counter m_ItemCounter ; ///< Item counter + + //@cond + /// Position pointer for item search + struct position { + atomic_node_ptr * pPrev ; ///< Previous node + node_type * pCur ; ///< Current node + node_type * pNext ; ///< Next node + }; + + typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_erase_functor { + void operator()( value_type const & item ) + {} + }; + + struct get_functor { + value_type * pFound; + + get_functor() + : pFound(null_ptr()) + {} + + template + void operator()( value_type& item, Q& val ) + { + pFound = &item; + } + }; + +# endif + + static void clear_links( node_type * pNode ) + { + pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); + } + + struct clear_and_dispose { + void operator()( value_type * p ) + { + assert( p != null_ptr() ); + clear_links( node_traits::to_node_ptr(p)); + disposer()( p ); + } + }; + //@endcond + + public: + typedef cds::urcu::exempt_ptr< gc, value_type, value_type, clear_and_dispose, void > exempt_ptr ; ///< pointer to extracted node + + protected: + //@cond + + static void dispose_node( node_type * pNode ) + { + assert( pNode ); + assert( !gc::is_locked() ); + + gc::template retire_ptr( node_traits::to_value_ptr( *pNode ) ); + } + + bool link_node( node_type * pNode, position& pos ) + { + assert( pNode != null_ptr() ); + link_checker::is_empty( pNode ); + + marked_node_ptr p( pos.pCur ); + pNode->m_pNext.store( p, memory_model::memory_order_relaxed ); + return pos.pPrev->compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + } + + bool unlink_node( position& pos ) + { + // Mark the node (logical deleting) + marked_node_ptr next(pos.pNext, 0); + if ( pos.pCur->m_pNext.compare_exchange_strong( next, marked_node_ptr(pos.pNext, 1), memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) { + marked_node_ptr cur(pos.pCur); + if ( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + return true; + next |= 1; + CDS_VERIFY( pos.pCur->m_pNext.compare_exchange_strong( next, next ^ 1, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } + return false; + } + //@endcond + + protected: + //@cond + template + class iterator_type + { + friend class MichaelList; + value_type * m_pNode; + + void next() + { + if ( m_pNode ) { + node_type * p = node_traits::to_node_ptr( *m_pNode )->m_pNext.load(memory_model::memory_order_relaxed).ptr(); + m_pNode = p ? node_traits::to_value_ptr(p) : null_ptr(); + } + } + + protected: + explicit iterator_type( node_type * pNode) + { + if ( pNode ) + m_pNode = node_traits::to_value_ptr( *pNode ); + else + m_pNode = null_ptr(); + } + explicit iterator_type( atomic_node_ptr const& refNode) + { + node_type * pNode = refNode.load(memory_model::memory_order_relaxed).ptr(); + m_pNode = pNode ? node_traits::to_value_ptr( *pNode ) : null_ptr(); + } + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + : m_pNode(null_ptr()) + {} + + iterator_type( const iterator_type& src ) + : m_pNode( src.m_pNode ) + {} + + value_ptr operator ->() const + { + return m_pNode; + } + + value_ref operator *() const + { + assert( m_pNode != null_ptr() ); + return *m_pNode; + } + + /// Pre-increment + iterator_type& operator ++() + { + next(); + return *this; + } + + /// Post-increment + iterator_type operator ++(int) + { + iterator_type i(*this); + next(); + return i; + } + + iterator_type& operator = (const iterator_type& src) + { + m_pNode = src.m_pNode; + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator_type const& i ) const + { + return m_pNode != i.m_pNode; + } + }; + //@endcond + + public: + /// Forward iterator + typedef iterator_type iterator; + /// Const forward iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_pHead ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to \p NULL. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + return const_iterator(m_pHead ); + } + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() + { + return const_iterator(m_pHead ); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator(); + } + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() + { + return const_iterator(); + } + + public: + /// Default constructor initializes empty list + MichaelList() + : m_pHead( null_ptr()) + { + static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); + } + + /// Destroy list + ~MichaelList() + { + clear(); + } + + /// Inserts new node + /** + The function inserts \p val in the list if the list does not contain + an item with key equal to \p val. + + The function makes RCU lock internally. + + Returns \p true if \p val is linked into the list, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert_at( m_pHead, val ); + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split new item creating into two part: + - create item with key only + - insert new item into the list + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this list's item by concurrent threads. + The user-defined functor is called only if the inserting is success and may be passed by reference + using boost::ref. + + The function makes RCU lock internally. + */ + template + bool insert( value_type& val, Func f ) + { + return insert_at( m_pHead, val, f ); + } + + /// Ensures that the \p item exists in the list + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the list, then \p val is inserted into the list. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + struct functor { + void operator()( bool bNew, value_type& item, value_type& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You can pass \p func argument by value or by reference using boost::ref or cds::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + + The function makes RCU lock internally. + */ + + template + std::pair ensure( value_type& val, Func func ) + { + return ensure_at( m_pHead, val, func ); + } + + /// Unlinks the item \p val from the list + /** + The function searches the item \p val in the list and unlink it from the list + if it is found and it is equal to \p val. + + Difference between \ref erase and \p unlink functions: \p erase finds a key + and deletes the item found. \p unlink finds an item by key and deletes it + only if \p val is an item of that list, i.e. the pointer to item found + is equal to &val . + + The function returns \p true if success and \p false otherwise. + + RCU \p synchronize method can be called. + Note that depending on RCU type used the \ref disposer call can be deferred. + + The function can throw cds::urcu::rcu_deadlock exception if deadlock is encountered and + deadlock checking policy is opt::v::rcu_throw_deadlock. + */ + bool unlink( value_type& val ) + { + return unlink_at( m_pHead, val ); + } + + /// Deletes the item from the list + /** \anchor cds_intrusive_MichaelList_rcu_erase_val + The function searches an item with key equal to \p val in the list, + unlinks it from the list, and returns \p true. + If the item with the key equal to \p val is not found the function return \p false. + + RCU \p synchronize method can be called. + Note that depending on RCU type used the \ref disposer call can be deferred. + + The function can throw \ref cds_urcu_rcu_deadlock "cds::urcu::rcu_deadlock" exception if a deadlock is detected and + the deadlock checking policy is opt::v::rcu_throw_deadlock. + */ + template + bool erase( Q const& val ) + { + return erase_at( m_pHead, val, key_comparator() ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelList_rcu_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& val, Less pred ) + { + return erase_at( m_pHead, val, cds::opt::details::make_comparator_from_less() ); + } + + /// Deletes the item from the list + /** \anchor cds_intrusive_MichaelList_rcu_erase_func + The function searches an item with key equal to \p val in the list, + call \p func functor with item found, unlinks it from the list, and returns \p true. + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + The functor may be passed by reference using boost:ref + + If the item with the key equal to \p val is not found the function return \p false. + + RCU \p synchronize method can be called. + Note that depending on RCU type used the \ref disposer call can be deferred. + + The function can throw \ref cds_urcu_rcu_deadlock "cds::urcu::rcu_deadlock" exception if a deadlock is detected and + the deadlock checking policy is opt::v::rcu_throw_deadlock. + */ + template + bool erase( Q const& val, Func func ) + { + return erase_at( m_pHead, val, key_comparator(), func ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelList_rcu_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& val, Less pred, Func func ) + { + return erase_at( m_pHead, val, cds::opt::details::make_comparator_from_less(), func ); + } + + /// Extracts an item from the list + /** + @anchor cds_intrusive_MichaelList_rcu_extract + The function searches an item with key equal to \p val in the list, + unlinks it from the list, and returns pointer to an item found in \p dest parameter. + If the item with the key equal to \p val is not found the function returns \p false, + \p dest is empty. + + @note The function does NOT call RCU read-side lock or synchronization, + and does NOT dispose the item found. It just excludes the item from the list + and returns a pointer to item found. + You should lock RCU before calling this function, and you should manually release + \p dest exempt pointer outside the RCU lock before reusing the pointer. + + \code + #include + #include + + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::intrusive::MichaelList< rcu, Foo > rcu_michael_list; + + rcu_michael_list theList; + // ... + + rcu_michael_list::exempt_ptr p1; + { + // first, we should lock RCU + rcu::scoped_lock sl; + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + if ( theList.extract( p1, 10 )) { + // do something with p1 + ... + } + } + + // We may safely release p1 here + // release() passes the pointer to RCU reclamation cycle: + // it invokes RCU retire_ptr function with the disposer you provided for the list. + p1.release(); + \endcode + */ + template + bool extract( exempt_ptr& dest, Q const& val ) + { + dest = extract_at( m_pHead, val, key_comparator() ); + return !dest.empty(); + } + + /// Extracts an item from the list using \p pred predicate for searching + /** + This function is the analog for \ref cds_intrusive_MichaelList_rcu_extract "extract(exempt_ptr&, Q const&)". + + The \p pred is a predicate used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as \ref key_comparator. + */ + template + bool extract_with( exempt_ptr& dest, Q const& val, Less pred ) + { + dest = extract_at( m_pHead, val, cds::opt::details::make_comparator_from_less() ); + return !dest.empty(); + } + + /// Find the key \p val + /** \anchor cds_intrusive_MichaelList_rcu_find_func + The function searches the item with key equal to \p val + and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. + The function \p find does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function makes RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) const + { + return find_at( const_cast(m_pHead), val, key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelList_rcu_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& val, Less pred, Func f ) const + { + return find_at( const_cast( m_pHead ), val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Find the key \p val + /** \anchor cds_intrusive_MichaelList_rcu_find_cfunc + The function searches the item with key equal to \p val + and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. + The function \p find does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function makes RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) const + { + return find_at( const_cast( m_pHead ), val, key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelList_rcu_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& val, Less pred, Func f ) const + { + return find_at( const_cast( m_pHead ), val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Find the key \p val + /** \anchor cds_intrusive_MichaelList_rcu_find_val + The function searches the item with key equal to \p val + and returns \p true if \p val found or \p false otherwise. + */ + template + bool find( Q const& val ) const + { + return find_at( const_cast( m_pHead ), val, key_comparator() ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelList_rcu_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& val, Less pred ) const + { + return find_at( const_cast( m_pHead ), val, cds::opt::details::make_comparator_from_less() ); + } + + /// Finds the key \p val and return the item found + /** \anchor cds_intrusive_MichaelList_rcu_get + The function searches the item with key equal to \p val and returns the pointer to item found. + If \p val is not found it returns \p NULL. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::intrusive::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > ord_list; + ord_list theList; + // ... + { + // Lock RCU + ord_list::rcu_lock lock; + + foo * pVal = theList.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + // Unlock RCU by rcu_lock destructor + // pVal can be retired by disposer at any time after RCU has been unlocked + } + \endcode + */ + template + value_type * get( Q const& val ) const + { + return get_at( const_cast( m_pHead ), val, key_comparator()); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_intrusive_MichaelList_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + value_type * get_with( Q const& val, Less pred ) const + { + return get_at( const_cast( m_pHead ), val, cds::opt::details::make_comparator_from_less()); + } + + /// Clears the list using default disposer + /** + The function clears the list using default (provided in class template) disposer functor. + + RCU \p synchronize method can be called. + Note that depending on RCU type used the \ref disposer invocation can be deferred. + + The function can throw cds::urcu::rcu_deadlock exception if an deadlock is encountered and + deadlock checking policy is opt::v::rcu_throw_deadlock. + */ + void clear() + { + if( !empty() ) { + check_deadlock_policy::check(); + + marked_node_ptr pHead; + for (;;) { + { + rcu_lock l; + pHead = m_pHead.load(memory_model::memory_order_consume); + if ( !pHead.ptr() ) + break; + marked_node_ptr pNext( pHead->m_pNext.load(memory_model::memory_order_relaxed) ); + if ( !pHead->m_pNext.compare_exchange_weak( pNext, pNext | 1, memory_model::memory_order_acquire, memory_model::memory_order_relaxed )) + continue; + if ( !m_pHead.compare_exchange_weak( pHead, marked_node_ptr(pNext.ptr()), memory_model::memory_order_release, memory_model::memory_order_relaxed )) + continue; + } + + --m_ItemCounter; + dispose_node( pHead.ptr() ); + } + } + } + + /// Check if the list is empty + bool empty() const + { + return m_pHead.load(memory_model::memory_order_relaxed).all() == null_ptr(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomics::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + protected: + //@cond + // split-list support + bool insert_aux_node( node_type * pNode ) + { + return insert_aux_node( m_pHead, pNode ); + } + + // split-list support + bool insert_aux_node( atomic_node_ptr& refHead, node_type * pNode ) + { + assert( pNode != null_ptr() ); + + // Hack: convert node_type to value_type. + // In principle, auxiliary node can be non-reducible to value_type + // We assume that comparator can correctly distinguish between aux and regular node. + return insert_at( refHead, *node_traits::to_value_ptr( pNode ) ); + } + + bool insert_at( atomic_node_ptr& refHead, value_type& val, bool bLock = true ) + { + link_checker::is_empty( node_traits::to_node_ptr( val ) ); + position pos; + + rcu_lock l( bLock ); + while ( true ) { + if ( search( refHead, val, pos, key_comparator() ) ) + return false; + + if ( link_node( node_traits::to_node_ptr( val ), pos ) ) { + ++m_ItemCounter; + return true; + } + + // clear next field + node_traits::to_node_ptr( val )->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); + } + } + + template + bool insert_at( atomic_node_ptr& refHead, value_type& val, Func f ) + { + link_checker::is_empty( node_traits::to_node_ptr( val ) ); + position pos; + + rcu_lock l; + while ( true ) { + if ( search( refHead, val, pos, key_comparator() ) ) + return false; + + if ( link_node( node_traits::to_node_ptr( val ), pos ) ) { + cds::unref(f)( val ); + ++m_ItemCounter; + return true; + } + + // clear next field + node_traits::to_node_ptr( val )->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); + } + } + + iterator insert_at_( atomic_node_ptr& refHead, value_type& val, bool bLock = true ) + { + rcu_lock l( bLock ); + if ( insert_at( refHead, val, false )) + return iterator( node_traits::to_node_ptr( val )); + return end(); + } + + template + std::pair ensure_at_( atomic_node_ptr& refHead, value_type& val, Func func, bool bLock = true ) + { + position pos; + + rcu_lock l( bLock ); + while ( true ) { + if ( search( refHead, val, pos, key_comparator() ) ) { + assert( key_comparator()( val, *node_traits::to_value_ptr( *pos.pCur ) ) == 0 ); + + unref(func)( false, *node_traits::to_value_ptr( *pos.pCur ), val ); + return std::make_pair( iterator( pos.pCur ), false ); + } + else { + link_checker::is_empty( node_traits::to_node_ptr( val ) ); + + if ( link_node( node_traits::to_node_ptr( val ), pos ) ) { + ++m_ItemCounter; + unref(func)( true, val , val ); + return std::make_pair( iterator( node_traits::to_node_ptr( val )), true ); + } + + // clear the next field + node_traits::to_node_ptr( val )->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); + } + } + } + + template + std::pair ensure_at( atomic_node_ptr& refHead, value_type& val, Func func, bool bLock = true ) + { + rcu_lock l( bLock ); + std::pair ret = ensure_at_( refHead, val, func, false ); + return std::make_pair( ret.first != end(), ret.second ); + } + + bool unlink_at( atomic_node_ptr& refHead, value_type& val ) + { + position pos; + back_off bkoff; + check_deadlock_policy::check(); + + for (;;) { + { + rcu_lock l; + if ( !search( refHead, val, pos, key_comparator() ) || node_traits::to_value_ptr( *pos.pCur ) != &val ) + return false; + if ( !unlink_node( pos )) { + bkoff(); + continue; + } + } + + --m_ItemCounter; + dispose_node( pos.pCur ); + return true; + } + } + + template + bool erase_at( atomic_node_ptr& refHead, Q const& val, Compare cmp, Func f, position& pos ) + { + back_off bkoff; + check_deadlock_policy::check(); + + for (;;) { + { + rcu_lock l; + if ( !search( refHead, val, pos, cmp ) ) + return false; + if ( !unlink_node( pos )) { + bkoff(); + continue; + } + } + + cds::unref(f)( *node_traits::to_value_ptr( *pos.pCur ) ); + --m_ItemCounter; + dispose_node( pos.pCur ); + return true; + } + } + + template + bool erase_at( atomic_node_ptr& refHead, Q const& val, Compare cmp, Func f ) + { + position pos; + return erase_at( refHead, val, cmp, f, pos ); + } + + template + bool erase_at( atomic_node_ptr& refHead, const Q& val, Compare cmp ) + { + position pos; +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_at( refHead, val, cmp, [](value_type const&){}, pos ); +# else + return erase_at( refHead, val, cmp, empty_erase_functor(), pos ); +# endif + } + + template + value_type * extract_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) + { + position pos; + back_off bkoff; + assert( gc::is_locked() ) ; // RCU must be locked!!! + + for (;;) { + if ( !search( refHead, val, pos, cmp ) ) + return null_ptr(); + if ( !unlink_node( pos )) { + bkoff(); + continue; + } + + --m_ItemCounter; + return node_traits::to_value_ptr( pos.pCur ); + } + } + + template + bool find_at( atomic_node_ptr& refHead, Q& val, Compare cmp, Func f, bool bLock = true ) const + { + position pos; + + rcu_lock l( bLock ); + if ( search( refHead, val, pos, cmp ) ) { + assert( pos.pCur != null_ptr() ); + unref(f)( *node_traits::to_value_ptr( *pos.pCur ), val ); + return true; + } + return false; + } + + template + bool find_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) const + { + rcu_lock l; + return find_at_( refHead, val, cmp ) != end(); + } + + template + value_type * get_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) const + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + value_type * pFound = null_ptr(); + return find_at( refHead, val, cmp, + [&pFound](value_type& found, Q const& ) { pFound = &found; } ) + ? pFound : null_ptr(); +# else + get_functor gf; + return find_at( refHead, val, cmp, cds::ref(gf) ) + ? gf.pFound : null_ptr(); +# endif + } + + template + const_iterator find_at_( atomic_node_ptr& refHead, Q const& val, Compare cmp ) const + { + assert( gc::is_locked() ); + position pos; + + if ( search( refHead, val, pos, cmp ) ) { + assert( pos.pCur != null_ptr() ); + return const_iterator( pos.pCur ); + } + return end(); + } + + //@endcond + + protected: + + //@cond + template + bool search( atomic_node_ptr& refHead, const Q& val, position& pos, Compare cmp ) const + { + // RCU lock should be locked!!! + assert( gc::is_locked() ); + + atomic_node_ptr * pPrev; + marked_node_ptr pNext; + marked_node_ptr pCur; + + back_off bkoff; + + try_again: + pPrev = &refHead; + pCur = pPrev->load(memory_model::memory_order_acquire); + pNext = null_ptr(); + + while ( true ) { + if ( !pCur.ptr() ) { + pos.pPrev = pPrev; + pos.pCur = pCur.ptr(); + pos.pNext = pNext.ptr(); + return false; + } + + pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed); + + if ( pPrev->load(memory_model::memory_order_acquire) != pCur + || pNext != pCur->m_pNext.load(memory_model::memory_order_acquire) + || pNext.bits() != 0 ) // pNext contains deletion mark for pCur + { + // if pCur is marked as deleted (pNext.bits() != 0) + // we wait for physical removal. + // Helping technique is not suitable for RCU since it requires + // that the RCU should be in unlocking state. + bkoff(); + goto try_again; + } + + assert( pCur.ptr() != null_ptr() ); + int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val ); + if ( nCmp >= 0 ) { + pos.pPrev = pPrev; + pos.pCur = pCur.ptr(); + pos.pNext = pNext.ptr(); + return nCmp == 0; + } + pPrev = &( pCur->m_pNext ); + pCur = pNext; + } + } + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_MICHAEL_LIST_NOGC_H diff --git a/cds/intrusive/michael_set.h b/cds/intrusive/michael_set.h new file mode 100644 index 00000000..27f5435c --- /dev/null +++ b/cds/intrusive/michael_set.h @@ -0,0 +1,811 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_MICHAEL_SET_H +#define __CDS_INTRUSIVE_MICHAEL_SET_H + +#include +#include + +namespace cds { namespace intrusive { + + /// Michael's hash set + /** @ingroup cds_intrusive_map + \anchor cds_intrusive_MichaelHashSet_hp + + Source: + - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" + + Michael's hash table algorithm is based on lock-free ordered list and it is very simple. + The main structure is an array \p T of size \p M. Each element in \p T is basically a pointer + to a hash bucket, implemented as a singly linked list. The array of buckets cannot be dynamically expanded. + However, each bucket may contain unbounded number of items. + + Template parameters are: + - \p GC - Garbage collector used. Note the \p GC must be the same as the GC used for \p OrderedList + - \p OrderedList - ordered list implementation used as bucket for hash set, for example, MichaelList, LazyList. + The intrusive ordered list implementation specifies the type \p T stored in the hash-set, the reclamation + schema \p GC used by hash-set, the comparison functor for the type \p T and other features specific for + the ordered list. + - \p Traits - type traits. See michael_set::type_traits for explanation. + Instead of defining \p Traits struct you can use option-based syntax with michael_set::make_traits metafunction. + + There are several specializations of \p %MichaelHashSet for each GC. You should include: + - for \ref cds_intrusive_MichaelHashSet_rcu "RCU type" + - for \ref cds_intrusive_MichaelHashSet_nogc for persistent set + - for other GC (gc::HP, gc::HRC, gc::PTB) + + Hash functor + + Some member functions of Michael's hash set accept the key parameter of type \p Q which differs from \p value_type. + It is expected that type \p Q contains full key of \p value_type, and for equal keys of type \p Q and \p value_type + the hash values of these keys must be equal too. + The hash functor Traits::hash should accept parameters of both type: + \code + // Our node type + struct Foo { + std::string key_ ; // key field + // ... other fields + }; + + // Hash functor + struct fooHash { + size_t operator()( const std::string& s ) const + { + return std::hash( s ); + } + + size_t operator()( const Foo& f ) const + { + return (*this)( f.key_ ); + } + }; + \endcode + + + How to use + + First, you should define ordered list type to use in your hash set: + \code + // For gc::HP-based MichaelList implementation + #include + + // cds::intrusive::MichaelHashSet declaration + #include + + // Type of hash-set items + struct Foo: public cds::intrusive::michael_list::node< cds::gc::HP > + { + std::string key_ ; // key field + unsigned val_ ; // value field + // ... other value fields + }; + + // Declare comparator for the item + struct FooCmp + { + int operator()( const Foo& f1, const Foo& f2 ) const + { + return f1.key_.compare( f2.key_ ); + } + }; + + // Declare bucket type for Michael's hash set + // The bucket type is any ordered list type like MichaelList, LazyList + typedef cds::intrusive::MichaelList< cds::gc::HP, Foo, + typename cds::intrusive::michael_list::make_traits< + // hook option + cds::intrusive::opt::hook< cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::HP > > > + // item comparator option + ,cds::opt::compare< FooCmp > + >::type + > Foo_bucket; + \endcode + + Second, you should declare Michael's hash set container: + \code + + // Declare hash functor + // Note, the hash functor accepts parameter type Foo and std::string + struct FooHash { + size_t operator()( const Foo& f ) const + { + return cds::opt::v::hash()( f.key_ ); + } + size_t operator()( const std::string& f ) const + { + return cds::opt::v::hash()( f ); + } + }; + + // Michael's set typedef + typedef cds::intrusive::MichaelHashSet< + cds::gc::HP + ,Foo_bucket + ,typename cds::intrusive::michael_set::make_traits< + cds::opt::hash< FooHash > + >::type + > Foo_set; + \endcode + + Now, you can use \p Foo_set in your application. + + Like other intrusive containers, you may build several containers on single item structure: + \code + #include + #include + #include + + struct tag_key1_idx; + struct tag_key2_idx; + + // Your two-key data + // The first key is maintained by gc::HP, second key is maintained by gc::PTB garbage collectors + struct Foo + : public cds::intrusive::michael_list::node< cds::gc::HP, tag_key1_idx > + , public cds::intrusive::michael_list::node< cds::gc::PTB, tag_key2_idx > + { + std::string key1_ ; // first key field + unsigned int key2_ ; // second key field + + // ... value fields and fields for controlling item's lifetime + }; + + // Declare comparators for the item + struct Key1Cmp + { + int operator()( const Foo& f1, const Foo& f2 ) const { return f1.key1_.compare( f2.key1_ ) ; } + }; + struct Key2Less + { + bool operator()( const Foo& f1, const Foo& f2 ) const { return f1.key2_ < f2.key1_ ; } + }; + + // Declare bucket type for Michael's hash set indexed by key1_ field and maintained by gc::HP + typedef cds::intrusive::MichaelList< cds::gc::HP, Foo, + typename cds::intrusive::michael_list::make_traits< + // hook option + cds::intrusive::opt::hook< cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::HP >, tag_key1_idx > > + // item comparator option + ,cds::opt::compare< Key1Cmp > + >::type + > Key1_bucket; + + // Declare bucket type for Michael's hash set indexed by key2_ field and maintained by gc::PTB + typedef cds::intrusive::MichaelList< cds::gc::PTB, Foo, + typename cds::intrusive::michael_list::make_traits< + // hook option + cds::intrusive::opt::hook< cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::PTB >, tag_key2_idx > > + // item comparator option + ,cds::opt::less< Key2Less > + >::type + > Key2_bucket; + + // Declare hash functor + struct Key1Hash { + size_t operator()( const Foo& f ) const { return cds::opt::v::hash()( f.key1_ ) ; } + size_t operator()( const std::string& s ) const { return cds::opt::v::hash()( s ) ; } + }; + inline size_t Key2Hash( const Foo& f ) { return (size_t) f.key2_ ; } + + // Michael's set indexed by key1_ field + typedef cds::intrusive::MichaelHashSet< + cds::gc::HP + ,Key1_bucket + ,typename cds::intrusive::michael_set::make_traits< + cds::opt::hash< Key1Hash > + >::type + > key1_set; + + // Michael's set indexed by key2_ field + typedef cds::intrusive::MichaelHashSet< + cds::gc::PTB + ,Key2_bucket + ,typename cds::intrusive::michael_set::make_traits< + cds::opt::hash< Key2Hash > + >::type + > key2_set; + \endcode + */ + template < + class GC, + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_set::type_traits +#else + class Traits +#endif + > + class MichaelHashSet + { + public: + typedef OrderedList ordered_list ; ///< type of ordered list used as a bucket implementation + typedef ordered_list bucket_type ; ///< bucket type + typedef Traits options ; ///< Traits template parameters + + typedef typename ordered_list::value_type value_type ; ///< type of value stored in the list + typedef GC gc ; ///< Garbage collector + typedef typename ordered_list::key_comparator key_comparator ; ///< key comparison functor + typedef typename ordered_list::disposer disposer ; ///< Node disposer functor + + /// Hash functor for \ref value_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename options::hash >::type hash; + typedef typename options::item_counter item_counter ; ///< Item counter type + + typedef typename ordered_list::guarded_ptr guarded_ptr; ///< Guarded pointer + + /// Bucket table allocator + typedef cds::details::Allocator< bucket_type, typename options::allocator > bucket_table_allocator; + + protected: + item_counter m_ItemCounter ; ///< Item counter + hash m_HashFunctor ; ///< Hash functor + + bucket_type * m_Buckets ; ///< bucket table + + private: + //@cond + const size_t m_nHashBitmask; + //@endcond + + protected: + /// Calculates hash value of \p key + template + size_t hash_value( const Q& key ) const + { + return m_HashFunctor( key ) & m_nHashBitmask; + } + + /// Returns the bucket (ordered list) for \p key + template + bucket_type& bucket( const Q& key ) + { + return m_Buckets[ hash_value( key ) ]; + } + + public: + /// Forward iterator + /** + The forward iterator for Michael's set is based on \p OrderedList forward iterator and has some features: + - it has no post-increment operator + - it iterates items in unordered fashion + - The iterator cannot be moved across thread boundary since it may contain GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the set. + + Therefore, the use of iterators in concurrent environment is not good idea. Use the iterator for the concurrent container + for debug purpose only. + */ + typedef michael_set::details::iterator< bucket_type, false > iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef michael_set::details::iterator< bucket_type, true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() ); + } + + /// Returns a forward const iterator addressing the first element in a set + //@{ + const_iterator begin() const + { + return get_const_begin(); + } + const_iterator cbegin() + { + return get_const_begin(); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a set + //@{ + const_iterator end() const + { + return get_const_end(); + } + const_iterator cend() + { + return get_const_end(); + } + //@} + + private: + //@{ + const_iterator get_const_begin() const + { + return const_iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count() ); + } + const_iterator get_const_end() const + { + return const_iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() ); + } + //@} + + public: + /// Initializes hash set + /** + The Michael's hash set is an unbounded container, but its hash table is non-expandable. + At construction time you should pass estimated maximum item count and a load factor. + The load factor is average size of one bucket - a small number between 1 and 10. + The bucket is an ordered single-linked list, searching in the bucket has linear complexity O(nLoadFactor). + The constructor defines hash table size as rounding nMaxItemCount / nLoadFactor up to nearest power of two. + */ + MichaelHashSet( + size_t nMaxItemCount, ///< estimation of max item count in the hash set + size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket. Small integer up to 10, default is 1. + ) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) + { + // GC and OrderedList::gc must be the same + static_assert(( std::is_same::value ), "GC and OrderedList::gc must be the same"); + + // atomicity::empty_item_counter is not allowed as a item counter + static_assert(( !std::is_same::value ), "atomicity::empty_item_counter is not allowed as a item counter"); + + m_Buckets = bucket_table_allocator().NewArray( bucket_count() ); + } + + /// Clears hash set object and destroys it + ~MichaelHashSet() + { + clear(); + bucket_table_allocator().Delete( m_Buckets, bucket_count() ); + } + + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + bool bRet = bucket( val ).insert( val ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success and can be passed by reference + using boost::ref + */ + template + bool insert( value_type& val, Func f ) + { + bool bRet = bucket( val ).insert( val, f ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Ensures that the \p val exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the set, then \p val is inserted into the set. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same thing. + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You may pass \p func argument by reference using boost::ref or cds::ref. + + Returns std::pair where \p first is \p true if operation is successfull, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair ensure( value_type& val, Func func ) + { + std::pair bRet = bucket( val ).ensure( val, func ); + if ( bRet.first && bRet.second ) + ++m_ItemCounter; + return bRet; + } + + /// Unlinks the item \p val from the set + /** + The function searches the item \p val in the set and unlink it from the set + if it is found and is equal to \p val. + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + bool bRet = bucket( val ).unlink( val ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_MichaelHashSet_hp_erase + The function searches an item with key equal to \p val in the set, + unlinks it from the set, and returns \p true. + If the item with key equal to \p val is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( Q const& val ) + { + if ( bucket( val ).erase( val )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_hp_erase "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& val, Less pred ) + { + if ( bucket( val ).erase_with( val, pred )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_MichaelHashSet_hp_erase_func + The function searches an item with key equal to \p val in the set, + call \p f functor with item found, and unlinks it from the set. + The \ref disposer specified in \p OrderedList class template parameter is called + by garbage collector \p GC asynchronously. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + The functor may be passed by reference with boost:ref + + If the item with key equal to \p val is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( const Q& val, Func f ) + { + if ( bucket( val ).erase( val, f )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_hp_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( const Q& val, Less pred, Func f ) + { + if ( bucket( val ).erase_with( val, pred, f )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Extracts the item with specified \p key + /** \anchor cds_intrusive_MichaelHashSet_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the set, and returns it in \p dest parameter. + If the item with key equal to \p key is not found the function returns \p false. + + Note the compare functor should accept a parameter of type \p Q that may be not the same as \p value_type. + + The \ref disposer specified in \p OrderedList class' template parameter is called automatically + by garbage collector \p GC when returned \ref guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::intrusive::MichaelHashSet< your_template_args > michael_set; + michael_set theSet; + // ... + { + michael_set::guarded_ptr gp; + theSet.extract( gp, 5 ); + // Deal with gp + // ... + + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + bool extract( guarded_ptr& dest, Q const& key ) + { + if ( bucket( key ).extract( dest, key )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Extracts the item using compare functor \p pred + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_hp_extract "extract(guarded_ptr&, Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool extract_with( guarded_ptr& dest, Q const& key, Less pred ) + { + if ( bucket( key ).extract_with( dest, key, pred )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Finds the key \p val + /** \anchor cds_intrusive_MichaelHashSet_hp_find_func + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return bucket( val ).find( val, f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_hp_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { + return bucket( val ).find_with( val, pred, f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_MichaelHashSet_hp_find_cfunc + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return bucket( val ).find( val, f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_hp_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { + return bucket( val ).find_with( val, pred, f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_MichaelHashSet_hp_find_val + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool find( Q const & val ) + { + return bucket( val ).find( val ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_hp_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const & val, Less pred ) + { + return bucket( val ).find_with( val, pred ); + } + + /// Finds the key \p val and return the item found + /** \anchor cds_intrusive_MichaelHashSet_hp_get + The function searches the item with key equal to \p val + and assigns the item found to guarded pointer \p ptr. + The function returns \p true if \p val is found, and \p false otherwise. + If \p val is not found the \p ptr parameter is not changed. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::intrusive::MichaeHashSet< your_template_params > michael_set; + michael_set theSet; + // ... + { + michael_set::guarded_ptr gp; + if ( theSet.get( gp, 5 )) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for \p OrderedList template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool get( guarded_ptr& ptr, Q const& val ) + { + return bucket( val ).get( ptr, val ); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_hp_get "get( guarded_ptr& ptr, Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool get_with( guarded_ptr& ptr, Q const& val, Less pred ) + { + return bucket( val ).get_with( ptr, val, pred ); + } + + /// Clears the set (non-atomic) + /** + The function unlink all items from the set. + The function is not atomic. It cleans up each bucket and then resets the item counter to zero. + If there are a thread that performs insertion while \p clear is working the result is undefined in general case: + empty() may return \p true but the set may contain item(s). + Therefore, \p clear may be used only for debugging purposes. + + For each item the \p disposer is called after unlinking. + */ + void clear() + { + for ( size_t i = 0; i < bucket_count(); ++i ) + m_Buckets[i].clear(); + m_ItemCounter.reset(); + } + + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, the correct item counting feature is an important part of Michael's set implementation. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + size_t size() const + { + return m_ItemCounter; + } + + /// Returns the size of hash table + /** + Since MichaelHashSet cannot dynamically extend the hash table size, + the value returned is an constant depending on object initialization parameters; + see MichaelHashSet::MichaelHashSet for explanation. + */ + size_t bucket_count() const + { + return m_nHashBitmask + 1; + } + }; + +}} // namespace cds::intrusive + +#endif // ifndef __CDS_INTRUSIVE_MICHAEL_SET_H diff --git a/cds/intrusive/michael_set_base.h b/cds/intrusive/michael_set_base.h new file mode 100644 index 00000000..61f9e78a --- /dev/null +++ b/cds/intrusive/michael_set_base.h @@ -0,0 +1,207 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_MICHAEL_SET_BASE_H +#define __CDS_INTRUSIVE_MICHAEL_SET_BASE_H + +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// MichaelHashSet related definitions + /** @ingroup cds_intrusive_helper + */ + namespace michael_set { + + /// Type traits for MichaelHashSet class + struct type_traits { + /// Hash function + /** + Hash function converts the key fields of struct \p T stored in the hash-set + into value of type \p size_t called hash value that is an index of hash table. + + This is mandatory type and has no predefined one. + */ + typedef opt::none hash; + + /// Item counter + /** + The item counting is an important part of MichaelHashSet algorithm: + the empty() member function depends on correct item counting. + Therefore, atomicity::empty_item_counter is not allowed as a type of the option. + + Default is atomicity::item_counter. + */ + typedef atomicity::item_counter item_counter; + + /// Bucket table allocator + /** + Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR + The allocator uses only in ctor (for allocating bucket table) + and in dtor (for destroying bucket table) + */ + typedef CDS_DEFAULT_ALLOCATOR allocator; + }; + + /// Metafunction converting option list to traits struct + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + + Available \p Options: + - opt::hash - mandatory option, specifies hash functor. + - opt::item_counter - optional, specifies item counting policy. See type_traits::item_counter + for default type. + - opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR. + + See \ref MichaelHashSet, \ref type_traits. + */ + template + struct make_traits { + typedef typename cds::opt::make_options< type_traits, CDS_OPTIONS3>::type type ; ///< Result of metafunction + }; + + //@cond + namespace details { + static inline size_t init_hash_bitmask( size_t nMaxItemCount, size_t nLoadFactor ) + { + if ( nLoadFactor == 0 ) + nLoadFactor = 1; + if ( nMaxItemCount == 0 ) + nMaxItemCount = 4; + const size_t nBucketCount = (size_t)( nMaxItemCount / nLoadFactor ); + const size_t nLog2 = cds::bitop::MSB( nBucketCount ); + + return (( size_t( 1 << nLog2 ) < nBucketCount ? size_t( 1 << (nLog2 + 1) ) : size_t( 1 << nLog2 ))) - 1; + } + + template + struct list_iterator_selector; + + template + struct list_iterator_selector< OrderedList, false> + { + typedef OrderedList * bucket_ptr; + typedef typename OrderedList::iterator type; + }; + + template + struct list_iterator_selector< OrderedList, true> + { + typedef OrderedList const * bucket_ptr; + typedef typename OrderedList::const_iterator type; + }; + + template + class iterator + { + protected: + typedef OrderedList bucket_type; + typedef typename list_iterator_selector< bucket_type, IsConst>::bucket_ptr bucket_ptr; + typedef typename list_iterator_selector< bucket_type, IsConst>::type list_iterator; + + bucket_ptr m_pCurBucket; + list_iterator m_itList; + bucket_ptr m_pEndBucket; + + void next() + { + if ( m_pCurBucket < m_pEndBucket ) { + if ( ++m_itList != m_pCurBucket->end() ) + return; + while ( ++m_pCurBucket < m_pEndBucket ) { + m_itList = m_pCurBucket->begin(); + if ( m_itList != m_pCurBucket->end() ) + return; + } + } + m_pCurBucket = m_pEndBucket - 1; + m_itList = m_pCurBucket->end(); + } + + public: + typedef typename list_iterator::value_ptr value_ptr; + typedef typename list_iterator::value_ref value_ref; + + public: + iterator() + : m_pCurBucket( null_ptr() ) + , m_itList() + , m_pEndBucket( null_ptr() ) + {} + + iterator( list_iterator const& it, bucket_ptr pFirst, bucket_ptr pLast ) + : m_pCurBucket( pFirst ) + , m_itList( it ) + , m_pEndBucket( pLast ) + { + if ( it == pFirst->end() ) + next(); + } + + iterator( iterator const& src ) + : m_pCurBucket( src.m_pCurBucket ) + , m_itList( src.m_itList ) + , m_pEndBucket( src.m_pEndBucket ) + {} + + value_ptr operator ->() const + { + assert( m_pCurBucket != null_ptr() ); + return m_itList.operator ->(); + } + + value_ref operator *() const + { + assert( m_pCurBucket != null_ptr() ); + return m_itList.operator *(); + } + + /// Pre-increment + iterator& operator ++() + { + next(); + return *this; + } + + iterator& operator = (const iterator& src) + { + m_pCurBucket = src.m_pCurBucket; + m_pEndBucket = src.m_pEndBucket; + m_itList = src.m_itList; + return *this; + } + + bucket_ptr bucket() const + { + return m_pCurBucket != m_pEndBucket ? m_pCurBucket : null_ptr(); + } + + template + bool operator ==(iterator const& i ) const + { + return m_pCurBucket == i.m_pCurBucket && m_itList == i.m_itList; + } + template + bool operator !=(iterator const& i ) const + { + return !( *this == i ); + } + + }; + } + //@endcond + } + + //@cond + // Forward declarations + template + class MichaelHashSet; + //@endcond + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_MICHAEL_SET_BASE_H diff --git a/cds/intrusive/michael_set_nogc.h b/cds/intrusive/michael_set_nogc.h new file mode 100644 index 00000000..51efe380 --- /dev/null +++ b/cds/intrusive/michael_set_nogc.h @@ -0,0 +1,386 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_MICHAEL_SET_NOGC_H +#define __CDS_INTRUSIVE_MICHAEL_SET_NOGC_H + +#include +#include +#include + +namespace cds { namespace intrusive { + + /// Michael's hash set (template specialization for gc::nogc) + /** @ingroup cds_intrusive_map + \anchor cds_intrusive_MichaelHashSet_nogc + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of list item. + + See \ref cds_intrusive_MichaelHashSet_hp "MichaelHashSet" for description of template parameters. + The template parameter \p OrderedList should be any gc::nogc-derived ordered list, for example, + \ref cds_intrusive_MichaelList_nogc "persistent MichaelList". + + The interface of the specialization is a slightly different. + */ + template < + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_set::type_traits +#else + class Traits +#endif + > + class MichaelHashSet< gc::nogc, OrderedList, Traits > + { + public: + typedef OrderedList bucket_type ; ///< type of ordered list used as a bucket implementation + typedef Traits options ; ///< Traits template parameters + + typedef typename bucket_type::value_type value_type ; ///< type of value stored in the list + typedef gc::nogc gc ; ///< Garbage collector + typedef typename bucket_type::key_comparator key_comparator ; ///< key comparison functor + typedef typename bucket_type::disposer disposer ; ///< Node disposer functor + + /// Hash functor for \ref value_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename options::hash >::type hash; + typedef typename options::item_counter item_counter ; ///< Item counter type + + /// Bucket table allocator + typedef cds::details::Allocator< bucket_type, typename options::allocator > bucket_table_allocator; + + protected: + item_counter m_ItemCounter ; ///< Item counter + hash m_HashFunctor ; ///< Hash functor + + bucket_type * m_Buckets ; ///< bucket table + + private: + //@cond + const size_t m_nHashBitmask; + //@endcond + + protected: + /// Calculates hash value of \p key + template + size_t hash_value( Q const & key ) const + { + return m_HashFunctor( key ) & m_nHashBitmask; + } + + /// Returns the bucket (ordered list) for \p key + template + bucket_type& bucket( Q const & key ) + { + return m_Buckets[ hash_value( key ) ]; + } + + public: + /// Forward iterator + /** + The forward iterator for Michael's set is based on \p OrderedList forward iterator and has some features: + - it has no post-increment operator + - it iterates items in unordered fashion + */ + typedef michael_set::details::iterator< bucket_type, false > iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef michael_set::details::iterator< bucket_type, true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() ); + } + + /// Returns a forward const iterator addressing the first element in a set + //@{ + const_iterator begin() const + { + return get_const_begin(); + } + const_iterator cbegin() + { + return get_const_begin(); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a set + //@{ + const_iterator end() const + { + return get_const_end(); + } + const_iterator cend() + { + return get_const_end(); + } + //@} + + private: + //@cond + const_iterator get_const_begin() const + { + return const_iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count() ); + } + const_iterator get_const_end() const + { + return const_iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() ); + } + //@endcond + + public: + /// Initializes hash set + /** + See \ref cds_intrusive_MichaelHashSet_hp "MichaelHashSet" ctor for explanation + */ + MichaelHashSet( + size_t nMaxItemCount, ///< estimation of max item count in the hash set + size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket + ) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) + { + // GC and OrderedList::gc must be the same + static_assert(( std::is_same::value ), "GC and OrderedList::gc must be the same"); + + // atomicity::empty_item_counter is not allowed as a item counter + static_assert(( !std::is_same::value ), "atomicity::empty_item_counter is not allowed as a item counter"); + + m_Buckets = bucket_table_allocator().NewArray( bucket_count() ); + } + + /// Clears hash set object and destroys it + ~MichaelHashSet() + { + clear(); + bucket_table_allocator().Delete( m_Buckets, bucket_count() ); + } + + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + bool bRet = bucket( val ).insert( val ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Ensures that the \p item exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the set, then \p val is inserted into the set. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same thing. + + The functor can change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You can pass \p func argument by value or by reference using boost::ref or cds::ref. + + Returns std::pair where \p first is \p true if operation is successfull, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair ensure( value_type& val, Func func ) + { + std::pair bRet = bucket( val ).ensure( val, func ); + if ( bRet.first && bRet.second ) + ++m_ItemCounter; + return bRet; + } + + /// Finds the key \p val + /** \anchor cds_intrusive_MichaelHashSet_nogc_find_val + The function searches the item with key equal to \p val + and returns pointer to item found, otherwise \p NULL. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + value_type * find( Q const& val ) + { + return bucket( val ).find( val ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_nogc_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + value_type * find_with( Q const& val, Less pred ) + { + return bucket( val ).find_with( val, pred ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_MichaelHashSet_nogc_find_func + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return bucket( val ).find( val, f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_nogc_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find( Q& val, Less pred, Func f ) + { + return bucket( val ).find_with( val, pred, f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_MichaelHashSet_nogc_find_cfunc + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return bucket( val ).find( val, f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_nogc_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { + return bucket( val ).find_with( val, pred, f ); + } + + /// Clears the set (non-atomic) + /** + The function unlink all items from the set. + The function is not atomic. It cleans up each bucket and then resets the item counter to zero. + If there are a thread that performs insertion while \p clear is working the result is undefined in general case: + empty() may return \p true but the set may contain item(s). + Therefore, \p clear may be used only for debugging purposes. + + For each item the \p disposer is called after unlinking. + */ + void clear() + { + for ( size_t i = 0; i < bucket_count(); ++i ) + m_Buckets[i].clear(); + m_ItemCounter.reset(); + } + + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, the correct item counting feature is an important part of Michael's set implementation. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + size_t size() const + { + return m_ItemCounter; + } + + /// Returns the size of hash table + /** + Since MichaelHashSet cannot dynamically extend the hash table size, + the value returned is an constant depending on object initialization parameters; + see MichaelHashSet::MichaelHashSet for explanation. + */ + size_t bucket_count() const + { + return m_nHashBitmask + 1; + } + + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_MICHAEL_SET_NOGC_H + diff --git a/cds/intrusive/michael_set_rcu.h b/cds/intrusive/michael_set_rcu.h new file mode 100644 index 00000000..8e130023 --- /dev/null +++ b/cds/intrusive/michael_set_rcu.h @@ -0,0 +1,681 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_MICHAEL_SET_RCU_H +#define __CDS_INTRUSIVE_MICHAEL_SET_RCU_H + +#include +#include + +namespace cds { namespace intrusive { + + /// Michael's hash set, \ref cds_urcu_desc "RCU" specialization + /** @ingroup cds_intrusive_map + \anchor cds_intrusive_MichaelHashSet_rcu + + Source: + - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" + + Michael's hash table algorithm is based on lock-free ordered list and it is very simple. + The main structure is an array \p T of size \p M. Each element in \p T is basically a pointer + to a hash bucket, implemented as a singly linked list. The array of buckets cannot be dynamically expanded. + However, each bucket may contain unbounded number of items. + + Template parameters are: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p OrderedList - ordered list implementation used as bucket for hash set, for example, MichaelList, LazyList. + The intrusive ordered list implementation specifies the type \p T stored in the hash-set, the reclamation + schema \p GC used by hash-set, the comparison functor for the type \p T and other features specific for + the ordered list. + - \p Traits - type traits. See michael_set::type_traits for explanation. + Instead of defining \p Traits struct you can use option-based syntax with michael_set::make_traits metafunction. + + \par Usage + Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" you should include: + \code + #include + #include + #include + + struct Foo { ... }; + // Hash functor for struct Foo + struct foo_hash { + size_t operator()( Foo const& foo ) const { return ... } + }; + + // Now, you can declare Michael's list for type Foo and default traits: + typedef cds::intrusive::MichaelList >, Foo > rcu_michael_list; + + // Declare Michael's set with MichaelList as bucket type + typedef cds::intrusive::MichaelSet< + cds::urcu::gc< general_buffered<> >, + rcu_michael_list, + cds::intrusive::michael_set::make_traits< + cds::opt::::hash< foo_hash > + >::type + > rcu_michael_set; + + // Declares hash set for 1000000 items with load factor 2 + rcu_michael_set theSet( 1000000, 2 ); + + // Now you can use theSet object in many threads without any synchronization. + \endcode + */ + template < + class RCU, + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_set::type_traits +#else + class Traits +#endif + > + class MichaelHashSet< cds::urcu::gc< RCU >, OrderedList, Traits > + { + public: + typedef OrderedList bucket_type ; ///< type of ordered list used as a bucket implementation + typedef Traits options ; ///< Traits template parameters + + typedef typename bucket_type::value_type value_type ; ///< type of value stored in the list + typedef cds::urcu::gc< RCU > gc ; ///< RCU schema + typedef typename bucket_type::key_comparator key_comparator ; ///< key comparison functor + typedef typename bucket_type::disposer disposer ; ///< Node disposer functor + + /// Hash functor for \ref value_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename options::hash >::type hash; + typedef typename options::item_counter item_counter ; ///< Item counter type + + /// Bucket table allocator + typedef cds::details::Allocator< bucket_type, typename options::allocator > bucket_table_allocator; + + typedef typename bucket_type::rcu_lock rcu_lock ; ///< RCU scoped lock + typedef typename bucket_type::exempt_ptr exempt_ptr ; ///< pointer to extracted node + /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that + static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = bucket_type::c_bExtractLockExternal; + + protected: + item_counter m_ItemCounter ; ///< Item counter + hash m_HashFunctor ; ///< Hash functor + + bucket_type * m_Buckets ; ///< bucket table + + private: + //@cond + const size_t m_nHashBitmask; + //@endcond + + protected: + //@cond + /// Calculates hash value of \p key + template + size_t hash_value( Q const& key ) const + { + return m_HashFunctor( key ) & m_nHashBitmask; + } + + /// Returns the bucket (ordered list) for \p key + template + bucket_type& bucket( Q const& key ) + { + return m_Buckets[ hash_value( key ) ]; + } + template + bucket_type const& bucket( Q const& key ) const + { + return m_Buckets[ hash_value( key ) ]; + } + //@endcond + + public: + /// Forward iterator + /** + The forward iterator for Michael's set is based on \p OrderedList forward iterator and has some features: + - it has no post-increment operator + - it iterates items in unordered fashion + */ + typedef michael_set::details::iterator< bucket_type, false > iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef michael_set::details::iterator< bucket_type, true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() ); + } + + /// Returns a forward const iterator addressing the first element in a set + //@{ + const_iterator begin() const + { + return get_const_begin(); + } + const_iterator cbegin() + { + return get_const_begin(); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a set + //@{ + const_iterator end() const + { + return get_const_end(); + } + const_iterator cend() + { + return get_const_end(); + } + //@} + + private: + //@cond + const_iterator get_const_begin() const + { + return const_iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count() ); + } + const_iterator get_const_end() const + { + return const_iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() ); + } + //@endcond + + public: + /// Initialize hash set + /** + The Michael's hash set is an unbounded container, but its hash table is non-expandable. + At construction time you should pass estimated maximum item count and a load factor. + The load factor is average size of one bucket - a small number between 1 and 10. + The bucket is an ordered single-linked list, the complexity of searching in the bucket is linear O(nLoadFactor). + The constructor defines hash table size as rounding nMaxItemCount / nLoadFactor up to nearest power of two. + */ + MichaelHashSet( + size_t nMaxItemCount, ///< estimation of max item count in the hash set + size_t nLoadFactor ///< load factor: average size of the bucket + ) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) + { + // GC and OrderedList::gc must be the same + static_assert(( std::is_same::value ), "GC and OrderedList::gc must be the same"); + + // atomicity::empty_item_counter is not allowed as a item counter + static_assert(( !std::is_same::value ), "atomicity::empty_item_counter is not allowed as a item counter"); + + m_Buckets = bucket_table_allocator().NewArray( bucket_count() ); + } + + /// Clear hash set and destroy it + ~MichaelHashSet() + { + clear(); + bucket_table_allocator().Delete( m_Buckets, bucket_count() ); + } + + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + bool bRet = bucket( val ).insert( val ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success and can be passed by reference + using boost::ref + */ + template + bool insert( value_type& val, Func f ) + { + bool bRet = bucket( val ).insert( val, f ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Ensures that the \p item exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the set, then \p val is inserted into the set. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same thing. + + The functor can change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You can pass \p func argument by value or by reference using boost::ref or cds::ref. + + Returns std::pair where \p first is \p true if operation is successfull, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair ensure( value_type& val, Func func ) + { + std::pair bRet = bucket( val ).ensure( val, func ); + if ( bRet.first && bRet.second ) + ++m_ItemCounter; + return bRet; + } + + /// Unlinks the item \p val from the set + /** + The function searches the item \p val in the set and unlink it from the set + if it is found and is equal to \p val. + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + bool bRet = bucket( val ).unlink( val ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_MichaelHashSet_rcu_erase + The function searches an item with key equal to \p val in the set, + unlinks it from the set, and returns \p true. + If the item with key equal to \p val is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that may be not the same as \p value_type. + */ + template + bool erase( Q const& val ) + { + if ( bucket( val ).erase( val )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_rcu_erase "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& val, Less pred ) + { + if ( bucket( val ).erase_with( val, pred )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_MichaelHashSet_rcu_erase_func + The function searches an item with key equal to \p val in the set, + call \p f functor with item found, and unlinks it from the set. + The \ref disposer specified in \p OrderedList class template parameter is called + by garbage collector \p GC asynchronously. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + The functor may be passed by reference with boost:ref + + If the item with key equal to \p val is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( const Q& val, Func f ) + { + if ( bucket( val ).erase( val, f )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_rcu_erase_func "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( const Q& val, Less pred, Func f ) + { + if ( bucket( val ).erase_with( val, pred, f )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Extracts an item from the set + /** \anchor cds_intrusive_MichaelHashSet_rcu_extract + The function searches an item with key equal to \p val in the set, + unlinks it from the set, places item pointer into \p dest argument, and returns \p true. + If the item with the key equal to \p val is not found the function return \p false. + + @note The function does NOT call RCU read-side lock or synchronization, + and does NOT dispose the item found. It just excludes the item from the set + and returns a pointer to item found. + You should lock RCU before calling of the function, and you should synchronize RCU + outside the RCU lock before reusing returned pointer. + + \code + #include + #include + #include + + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::intrusive::MichaelList< rcu, Foo > rcu_michael_list; + typedef cds::intrusive::MichaelHashSet< rcu, rcu_michael_list, foo_traits > rcu_michael_set; + + rcu_michael_set theSet; + // ... + + rcu_michael_set::exempt_ptr p; + { + // first, we should lock RCU + rcu_michael_set::rcu_lock lock; + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + if ( theSet.extract( p, 10 )) { + // do something with p + ... + } + } + + // We may safely release p here + // release() passes the pointer to RCU reclamation cycle: + // it invokes RCU retire_ptr function with the disposer you provided for rcu_michael_list. + p.release(); + \endcode + */ + template + bool extract( exempt_ptr& dest, Q const& val ) + { + if ( bucket( val ).extract( dest, val )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Extracts an item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_rcu_extract "extract(exempt_ptr&, Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool extract_with( exempt_ptr& dest, Q const& val, Less pred ) + { + if ( bucket( val ).extract_with( dest, val, pred )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Finds the key \p val + /** \anchor cds_intrusive_MichaelHashSet_rcu_find_val + The function searches the item with key equal to \p val + and returns \p true if \p val found or \p false otherwise. + */ + template + bool find( Q const& val ) const + { + return bucket( val ).find( val ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_rcu_find_val "find(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred ) const + { + return bucket( val ).find_with( val, pred ); + } + + /// Find the key \p val + /** \anchor cds_intrusive_MichaelHashSet_rcu_find_func + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) const + { + return bucket( val ).find( val, f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_rcu_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) const + { + return bucket( val ).find_with( val, pred, f ); + } + + /// Find the key \p val + /** \anchor cds_intrusive_MichaelHashSet_rcu_find_cfunc + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) const + { + return bucket( val ).find( val, f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_rcu_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred, Func f ) const + { + return bucket( val ).find_with( val, pred, f ); + } + + /// Finds the key \p val and return the item found + /** \anchor cds_intrusive_MichaelHashSet_rcu_get + The function searches the item with key equal to \p val and returns the pointer to item found. + If \p val is not found it returns \p NULL. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::intrusive::MichaelHashSet< your_template_parameters > hash_set; + hash_set theSet; + // ... + { + // Lock RCU + hash_set::rcu_lock lock; + + foo * pVal = theSet.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + // Unlock RCU by rcu_lock destructor + // pVal can be retired by disposer at any time after RCU has been unlocked + } + \endcode + */ + template + value_type * get( Q const& val ) const + { + return bucket( val ).get( val ); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + value_type * get_with( Q const& val, Less pred ) const + { + return bucket( val ).get_with( val, pred ); + } + + /// Clears the set (non-atomic) + /** + The function unlink all items from the set. + The function is not atomic. It cleans up each bucket and then resets the item counter to zero. + If there are a thread that performs insertion while \p clear is working the result is undefined in general case: + empty() may return \p true but the set may contain item(s). + Therefore, \p clear may be used only for debugging purposes. + + For each item the \p disposer is called after unlinking. + */ + void clear() + { + for ( size_t i = 0; i < bucket_count(); ++i ) + m_Buckets[i].clear(); + m_ItemCounter.reset(); + } + + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, the correct item counting feature is an important part of Michael's set implementation. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + size_t size() const + { + return m_ItemCounter; + } + + /// Returns the size of hash table + /** + Since %MichaelHashSet cannot dynamically extend the hash table size, + the value returned is an constant depending on object initialization parameters; + see \ref cds_intrusive_MichaelHashSet_hp "MichaelHashSet" for explanation. + */ + size_t bucket_count() const + { + return m_nHashBitmask + 1; + } + + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_MICHAEL_SET_NOGC_H + diff --git a/cds/intrusive/moir_queue.h b/cds/intrusive/moir_queue.h new file mode 100644 index 00000000..29e08121 --- /dev/null +++ b/cds/intrusive/moir_queue.h @@ -0,0 +1,173 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_MOIR_QUEUE_H +#define __CDS_INTRUSIVE_MOIR_QUEUE_H + +#include + +namespace cds { namespace intrusive { + + /// A variation of Michael & Scott's lock-free queue (intrusive variant) + /** @ingroup cds_intrusive_queue + This is slightly optimized Michael & Scott's queue algorithm that overloads \ref dequeue function. + + Source: + \li [2000] Simon Doherty, Lindsay Groves, Victor Luchangco, Mark Moir + "Formal Verification of a practical lock-free queue algorithm" + + Cite from this work about difference from Michael & Scott algo: + "Our algorithm differs from Michael and Scott’s [MS98] in that we test whether \p Tail points to the header + node only after \p Head has been updated, so a dequeuing process reads \p Tail only once. The dequeue in + [MS98] performs this test before checking whether the next pointer in the dummy node is null, which + means that it reads \p Tail every time a dequeuing process loops. Under high load, when operations retry + frequently, our modification will reduce the number of accesses to global memory. This modification, however, + introduces the possibility of \p Head and \p Tail “crossing”." + + Type of node: \ref single_link::node + + Explanation of template arguments see intrusive::MSQueue. + + \par Examples + \code + #include + #include + + namespace ci = cds::inrtusive; + typedef cds::gc::HP hp_gc; + + // MoirQueue with Hazard Pointer garbage collector, base hook + item disposer: + struct Foo: public ci::single_link::node< hp_gc > + { + // Your data + ... + }; + + // Disposer for Foo struct just deletes the object passed in + struct fooDisposer { + void operator()( Foo * p ) + { + delete p; + } + }; + + typedef ci::MoirQueue< + hp_gc + ,Foo + ,ci::opt::hook< + ci::single_link::base_hook< ci::opt::gc > + > + ,ci::opt::disposer< fooDisposer > + > fooQueue; + + // MoirQueue with Hazard Pointer garbage collector, + // member hook + item disposer + item counter, + // without alignment of internal queue data: + struct Bar + { + // Your data + ... + ci::single_link::node< hp_gc > hMember; + }; + + typedef ci::MoirQueue< + hp_gc + ,Foo + ,ci::opt::hook< + ci::single_link::member_hook< + offsetof(Bar, hMember) + ,ci::opt::gc + > + > + ,ci::opt::disposer< fooDisposer > + ,cds::opt::item_counter< cds::atomicity::item_counter > + ,cds::opt::alignment< cds::opt::no_special_alignment > + > barQueue; + + \endcode + */ + template + class MoirQueue: public MSQueue< GC, T, CDS_OPTIONS9 > + { + //@cond + typedef MSQueue< GC, T, CDS_OPTIONS9 > base_class; + typedef typename base_class::node_type node_type; + //@endcond + + public: + //@cond + typedef typename base_class::value_type value_type; + typedef typename base_class::back_off back_off; + typedef typename base_class::gc gc; + typedef typename base_class::node_traits node_traits; + typedef typename base_class::memory_model memory_model; + //@endcond + + /// Rebind template arguments + template + struct rebind { + typedef MoirQueue< GC2, T2, CDS_OTHER_OPTIONS9> other ; ///< Rebinding result + }; + + protected: + //@cond + typedef typename base_class::dequeue_result dequeue_result; + typedef typename base_class::node_to_value node_to_value; + + bool do_dequeue( dequeue_result& res ) + { + back_off bkoff; + + node_type * pNext; + node_type * h; + while ( true ) { + h = res.guards.protect( 0, base_class::m_pHead, node_to_value() ); + pNext = res.guards.protect( 1, h->m_pNext, node_to_value() ); + + if ( pNext == null_ptr() ) + return false ; // queue is empty + + if ( base_class::m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { + node_type * t = base_class::m_pTail.load(memory_model::memory_order_acquire); + if ( h == t ) + base_class::m_pTail.compare_exchange_strong( t, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + break; + } + + base_class::m_Stat.onDequeueRace(); + bkoff(); + } + + --base_class::m_ItemCounter; + base_class::m_Stat.onDequeue(); + + res.pHead = h; + res.pNext = pNext; + return true; + } + //@endcond + + public: + /// Dequeues a value from the queue + /** @anchor cds_intrusive_MoirQueue_dequeue + See warning about item disposing in \ref MSQueue::dequeue. + */ + value_type * dequeue() + { + dequeue_result res; + if ( do_dequeue( res )) { + base_class::dispose_result( res ); + return node_traits::to_value_ptr( *res.pNext ); + } + return null_ptr(); + } + + /// Synonym for \ref cds_intrusive_MoirQueue_dequeue "dequeue" function + value_type * pop() + { + return dequeue(); + } + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_MOIR_QUEUE_H diff --git a/cds/intrusive/mspriority_queue.h b/cds/intrusive/mspriority_queue.h new file mode 100644 index 00000000..fa651cc7 --- /dev/null +++ b/cds/intrusive/mspriority_queue.h @@ -0,0 +1,515 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_MSPRIORITY_QUEUE_H +#define __CDS_INTRUSIVE_MSPRIORITY_QUEUE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// MSPriorityQueue related definitions + /** @ingroup cds_intrusive_helper + */ + namespace mspriority_queue { + + /// MSPriorityQueue statistics + template + struct stat { + typedef Counter event_counter ; ///< Event counter type + + event_counter m_nPushCount ; ///< Count of success push operation + event_counter m_nPopCount ; ///< Count of success pop operation + event_counter m_nPushFailCount ; ///< Count of failed ("the queue is full") push operation + event_counter m_nPopFailCount ; ///< Count of failed ("the queue is empty") pop operation + event_counter m_nPushHeapifySwapCount ; ///< Count of item swapping when heapifying in push + event_counter m_nPopHeapifySwapCount ; ///< Count of item swapping when heapifying in pop + + //@cond + void onPushSuccess() { ++m_nPushCount ;} + void onPopSuccess() { ++m_nPopCount ;} + void onPushFailed() { ++m_nPushFailCount ;} + void onPopFailed() { ++m_nPopFailCount ;} + void onPushHeapifySwap() { ++m_nPushHeapifySwapCount ;} + void onPopHeapifySwap() { ++m_nPopHeapifySwapCount ;} + //@endcond + }; + + /// MSPriorityQueue empty statistics + struct empty_stat { + //@cond + void onPushSuccess() {} + void onPopSuccess() {} + void onPushFailed() {} + void onPopFailed() {} + void onPushHeapifySwap() {} + void onPopHeapifySwap() {} + //@endcond + }; + + /// Type traits for MSPriorityQueue + struct type_traits { + /// Storage type + /** + The storage type for the heap array. Default is cds::opt::v::dynamic_buffer. + + You may specify any type of buffer's value since at instantiation time + the \p buffer::rebind member metafunction is called to change type + of values stored in the buffer. + */ + typedef opt::v::dynamic_buffer buffer; + + /// Priority compare functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// specifies binary predicate used for priority comparing. + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// Type of mutual-exclusion lock + typedef lock::Spin lock_type; + + /// Back-off strategy + typedef backoff::yield back_off; + + /// Internal statistics + /** + Possible types: mspriority_queue::empty_stat (the default), mspriority_queue::stat + or any other with interface like \p %mspriority_queue::stat + */ + typedef empty_stat stat; + }; + + /// Metafunction converting option list to traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + + See \ref MSPriorityQueue, \ref type_traits, \ref cds::opt::make_options. + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS7 >::type + ,CDS_OPTIONS7 + >::type type; +# endif + }; + + } // namespace mspriority_queue + + /// Michael & Scott array-based lock-based concurrent priority queue heap + /** @ingroup cds_intrusive_priority_queue + Source: + - [1996] G.Hunt, M.Michael, S. Parthasarathy, M.Scott + "An efficient algorithm for concurrent priority queue heaps" + + \p %MSPriorityQueue augments the standard array-based heap data structure with + a mutual-exclusion lock on the heap's size and locks on each node in the heap. + Each node also has a tag that indicates whether + it is empty, valid, or in a transient state due to an update to the heap + by an inserting thread. + The algorithm allows concurrent insertions and deletions in opposite directions, + without risking deadlock and without the need for special server threads. + It also uses a "bit-reversal" technique to scatter accesses across the fringe + of the tree to reduce contention. + On large heaps the algorithm achieves significant performance improvements + over serialized single-lock algorithm, for various insertion/deletion + workloads. For small heaps it still performs well, but not as well as + single-lock algorithm. + + Template parameters: + - \p T - type to be stored in the list. The priority is a part of \p T type. + - \p Traits - type traits. See mspriority_queue::type_traits for explanation. + + It is possible to declare option-based queue with cds::container::mspriority_queue::make_traits + metafunction instead of \p Traits template argument. + Template argument list \p Options of \p %cds::container::mspriority_queue::make_traits metafunction are: + - opt::buffer - the buffer type for heap array. Possible type are: opt::v::static_buffer, opt::v::dynamic_buffer. + Default is \p %opt::v::dynamic_buffer. + You may specify any type of values for the buffer since at instantiation time + the \p buffer::rebind member metafunction is called to change the type of values stored in the buffer. + - opt::compare - priority compare functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for priority compare. Default is \p std::less. + - opt::lock_type - lock type. Default is cds::lock::Spin. + - opt::back_off - back-off strategy. Default is cds::backoff::yield + - opt::stat - internal statistics. Available types: mspriority_queue::stat, mspriority_queue::empty_stat (the default) + */ + template + class MSPriorityQueue: public cds::bounded_container + { + public: + typedef T value_type ; ///< Value type stored in the queue + typedef Traits traits ; ///< Traits template parameter + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< priority comparing functor based on opt::compare and opt::less option setter. +# else + typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; +# endif + + typedef typename traits::lock_type lock_type ; ///< heap's size lock type + typedef typename traits::back_off back_off ; ///< Back-off strategy + typedef typename traits::stat stat ; ///< internal statistics type + + protected: + //@cond + typedef cds::OS::ThreadId tag_type; + + enum tag_value { + Available = -1, + Empty = 0 + }; + //@endcond + + //@cond + /// Heap item type + struct node { + value_type * m_pVal ; ///< A value pointer + tag_type volatile m_nTag ; ///< A tag + mutable lock_type m_Lock ; ///< Node-level lock + + /// Creates empty node + node() + : m_pVal( null_ptr() ) + , m_nTag( tag_type(Empty) ) + {} + + /// Lock the node + void lock() + { + m_Lock.lock(); + } + + /// Unlock the node + void unlock() + { + m_Lock.unlock(); + } + }; + //@endcond + + protected: + //@cond +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_cleaner + { + void operator()( value_type const& ) const + {} + }; +# endif + //@endcond + + public: + typedef typename traits::buffer::template rebind::other buffer_type ; ///< Heap array buffer type + + //@cond + typedef cds::bitop::bit_reverse_counter<> item_counter_type; + typedef typename item_counter_type::counter_type counter_type; + //@endcond + + protected: + item_counter_type m_ItemCounter ; ///< Item counter + mutable lock_type m_Lock ; ///< Heap's size lock + buffer_type m_Heap ; ///< Heap array + stat m_Stat ; ///< internal statistics accumulator + + public: + /// Constructs empty priority queue + /** + For cds::opt::v::static_buffer the \p nCapacity parameter is ignored. + */ + MSPriorityQueue( size_t nCapacity ) + : m_Heap( nCapacity ) + {} + + /// Clears priority queue and destructs the object + ~MSPriorityQueue() + { + clear(); + } + + /// Inserts a item into priority queue + /** + If the priority queue is full, the function returns \p false, + no item has been added. + Otherwise, the function inserts the copy of \p val into the heap + and returns \p true. + + The function use copy constructor to create new heap item from \p val. + */ + bool push( value_type& val ) + { + tag_type const curId = cds::OS::getCurrentThreadId(); + + // Insert new item at bottom of the heap + m_Lock.lock(); + if ( m_ItemCounter.value() >= capacity() ) { + // the heap is full + m_Lock.unlock(); + m_Stat.onPushFailed(); + return false; + } + + counter_type i = m_ItemCounter.inc(); + assert( i < m_Heap.capacity() ); + + node& refNode = m_Heap[i]; + refNode.lock(); + m_Lock.unlock(); + refNode.m_pVal = &val; + refNode.m_nTag = curId; + refNode.unlock(); + + // Move item towards top of the heap while it has higher priority than parent + heapify_after_push( i, curId ); + + m_Stat.onPushSuccess(); + return true; + } + + /// Extracts item with high priority + /** + If the priority queue is empty, the function returns \p nullptr. + Otherwise, it returns the item extracted. + + The item returned may be disposed immediately. + */ + value_type * pop() + { + m_Lock.lock(); + if ( m_ItemCounter.value() == 0 ) { + // the heap is empty + m_Lock.unlock(); + m_Stat.onPopFailed(); + return false; + } + counter_type nBottom = m_ItemCounter.reversed_value(); + m_ItemCounter.dec(); + // Since m_Heap[0] is not used, capacity() returns m_Heap.capacity() - 1 + // Consequently, "<=" is here + assert( nBottom <= capacity() ); + assert( nBottom > 0 ); + + node& refBottom = m_Heap[ nBottom ]; + refBottom.lock(); + m_Lock.unlock(); + refBottom.m_nTag = tag_type(Empty); + value_type * pVal = refBottom.m_pVal; + refBottom.m_pVal = null_ptr(); + refBottom.unlock(); + + node& refTop = m_Heap[ 1 ]; + refTop.lock(); + if ( refTop.m_nTag == tag_type(Empty) ) { + // nBottom == nTop + refTop.unlock(); + m_Stat.onPopSuccess(); + return pVal; + } + + std::swap( refTop.m_pVal, pVal ); + refTop.m_nTag = tag_type( Available ); + + assert( nBottom > 1 ); + + // refTop will be unlocked inside heapify_after_pop + heapify_after_pop( 1, &refTop ); + + m_Stat.onPopSuccess(); + return pVal; + } + + /// Clears the queue (not atomic) + /** + This function is no atomic, but thread-safe + */ + void clear() + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + clear_with( []( value_type const& src ) {} ); +# else + clear_with( empty_cleaner() ); +# endif + } + + /// Clears the queue (not atomic) + /** + This function is no atomic, but thread-safe. + + For each item removed the functor \p f is called. + \p Func interface is: + \code + struct clear_functor + { + void operator()( value_type& item ); + }; + \endcode + A lambda function or a function pointer can be used as \p f. + */ + template + void clear_with( Func f ) + { + while ( !empty() ) { + value_type * pVal = pop(); + if ( pVal ) + cds::unref(f)( *pVal ); + } + } + + /// Checks is the priority queue is empty + bool empty() const + { + return size() == 0; + } + + /// Checks if the priority queue is full + bool full() const + { + return size() == capacity(); + } + + /// Returns current size of priority queue + size_t size() const + { + m_Lock.lock(); + size_t nSize = (size_t) m_ItemCounter.value(); + m_Lock.unlock(); + return nSize; + } + + /// Return capacity of the priority queue + size_t capacity() const + { + // m_Heap[0] is not used + return m_Heap.capacity() - 1; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + protected: + //@cond + + void heapify_after_push( counter_type i, tag_type curId ) + { + key_comparator cmp; + back_off bkoff; + + // Move item towards top of the heap while it has higher priority than parent + while ( i > 1 ) { + bool bProgress = true; + counter_type nParent = i / 2; + node& refParent = m_Heap[nParent]; + refParent.lock(); + node& refItem = m_Heap[i]; + refItem.lock(); + + if ( refParent.m_nTag == tag_type(Available) && refItem.m_nTag == curId ) { + if ( cmp( *refItem.m_pVal, *refParent.m_pVal ) > 0 ) { + std::swap( refItem.m_nTag, refParent.m_nTag ); + std::swap( refItem.m_pVal, refParent.m_pVal ); + m_Stat.onPushHeapifySwap(); + i = nParent; + } + else { + refItem.m_nTag = tag_type(Available); + i = 0; + } + } + else if ( refParent.m_nTag == tag_type(Empty) ) + i = 0; + else if ( refItem.m_nTag != curId ) + i = nParent; + else + bProgress = false; + + refItem.unlock(); + refParent.unlock(); + + if ( !bProgress ) + bkoff(); + else + bkoff.reset(); + } + + if ( i == 1 ) { + node& refItem = m_Heap[i]; + refItem.lock(); + if ( refItem.m_nTag == curId ) + refItem.m_nTag = tag_type(Available); + refItem.unlock(); + } + } + + void heapify_after_pop( counter_type nParent, node * pParent ) + { + key_comparator cmp; + + while ( nParent < m_Heap.capacity() / 2 ) { + counter_type nLeft = nParent * 2; + counter_type nRight = nLeft + 1; + node& refLeft = m_Heap[nLeft]; + node& refRight = m_Heap[nRight]; + refLeft.lock(); + refRight.lock(); + + counter_type nChild; + node * pChild; + if ( refLeft.m_nTag == tag_type(Empty) ) { + refRight.unlock(); + refLeft.unlock(); + break; + } + else if ( refRight.m_nTag == tag_type(Empty) || cmp( *refLeft.m_pVal, *refRight.m_pVal ) > 0 ) { + refRight.unlock(); + nChild = nLeft; + pChild = &refLeft; + } + else { + refLeft.unlock(); + nChild = nRight; + pChild = &refRight; + } + + // If child has higher priority that parent then swap + // Otherwise stop + if ( cmp( *pChild->m_pVal, *pParent->m_pVal ) > 0 ) { + std::swap( pParent->m_nTag, pChild->m_nTag ); + std::swap( pParent->m_pVal, pChild->m_pVal ); + pParent->unlock(); + m_Stat.onPopHeapifySwap(); + nParent = nChild; + pParent = pChild; + } + else { + pChild->unlock(); + break; + } + } + pParent->unlock(); + } + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_MSPRIORITY_QUEUE_H diff --git a/cds/intrusive/msqueue.h b/cds/intrusive/msqueue.h new file mode 100644 index 00000000..aa37ba5b --- /dev/null +++ b/cds/intrusive/msqueue.h @@ -0,0 +1,427 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_MSQUEUE_H +#define __CDS_INTRUSIVE_MSQUEUE_H + +#include +#include +#include + +#include + +namespace cds { namespace intrusive { + + /// Michael & Scott's lock-free queue (intrusive variant) + /** @ingroup cds_intrusive_queue + Implementation of well-known Michael & Scott's queue algorithm. + + \par Source: + [1998] Maged Michael, Michael Scott "Simple, fast, and practical non-blocking and blocking + concurrent queue algorithms" + + Template arguments: + - \p GC - garbage collector type: gc::HP, gc::HRC, gc::PTB + - \p T - type to be stored in the queue, should be convertible to \ref single_link::node + - \p Options - options + + Type of node: \ref single_link::node + + \p Options are: + - opt::hook - hook used. Possible values are: single_link::base_hook, single_link::member_hook, single_link::traits_hook. + If the option is not specified, single_link::base_hook<> is used. + For Gidenstam's gc::HRC, only single_link::base_hook is supported. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. + - opt::disposer - the functor used for dispose removed items. Default is opt::v::empty_disposer. This option is used + in \ref dequeue function. + - opt::link_checker - the type of node's link fields checking. Default is \ref opt::debug_check_link + Note: for gc::HRC garbage collector, link checking policy is always selected as \ref opt::always_check_link. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter (no item counting feature) + - opt::stat - the type to gather internal statistics. + Possible option value are: \ref queue_stat, \ref queue_dummy_stat, user-provided class that supports queue_stat interface. + Default is \ref queue_dummy_stat. + - opt::alignment - the alignment for internal queue data. Default is opt::cache_line_alignment + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + + Garbage collecting schema \p GC must be consistent with the single_link::node GC. + + \par About item disposing + The Michael & Scott's queue algo has a key feature: even if the queue is empty it contains one item that is "dummy" one from + the standpoint of the algo. See \ref dequeue function doc for explanation. + + \par Examples + \code + #include + #include + + namespace ci = cds::inrtusive; + typedef cds::gc::HP hp_gc; + + // MSQueue with Hazard Pointer garbage collector, base hook + item disposer: + struct Foo: public ci::single_link::node< hp_gc > + { + // Your data + ... + }; + + // Disposer for Foo struct just deletes the object passed in + struct fooDisposer { + void operator()( Foo * p ) + { + delete p; + } + }; + + typedef ci::MSQueue< hp_gc, + Foo + ,ci::opt::hook< + ci::single_link::base_hook< ci::opt::gc > + > + ,ci::opt::disposer< fooDisposer > + > fooQueue; + + // MSQueue with Hazard Pointer garbage collector, + // member hook + item disposer + item counter, + // without alignment of internal queue data: + struct Bar + { + // Your data + ... + ci::single_link::node< hp_gc > hMember; + }; + + typedef ci::MSQueue< hp_gc, + Foo + ,ci::opt::hook< + ci::single_link::member_hook< + offsetof(Bar, hMember) + ,ci::opt::gc + > + > + ,ci::opt::disposer< fooDisposer > + ,cds::opt::item_counter< cds::atomicity::item_counter > + ,cds::opt::alignment< cds::opt::no_special_alignment > + > barQueue; + \endcode + */ + template + class MSQueue + { + //@cond + struct default_options + { + typedef cds::backoff::empty back_off; + typedef single_link::base_hook<> hook; + typedef opt::v::empty_disposer disposer; + typedef atomicity::empty_item_counter item_counter; + typedef queue_dummy_stat stat; + typedef opt::v::relaxed_ordering memory_model; + static const opt::link_check_type link_checker = opt::debug_check_link; + enum { alignment = opt::cache_line_alignment }; + }; + //@endcond + + public: + //@cond + typedef typename opt::make_options< + typename cds::opt::find_type_traits< default_options, CDS_OPTIONS9 >::type + ,CDS_OPTIONS9 + >::type options; + //@endcond + + public: + typedef T value_type ; ///< type of value stored in the queue + typedef typename options::hook hook ; ///< hook type + typedef typename hook::node_type node_type ; ///< node type + typedef typename options::disposer disposer ; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + typedef typename single_link::get_link_checker< node_type, options::link_checker >::type link_checker ; ///< link checker + + typedef GC gc ; ///< Garbage collector + typedef typename options::back_off back_off ; ///< back-off strategy + typedef typename options::item_counter item_counter ; ///< Item counting policy used + typedef typename options::stat stat ; ///< Internal statistics policy used + typedef typename options::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + /// Rebind template arguments + template + struct rebind { + typedef MSQueue< GC2, T2, CDS_OTHER_OPTIONS9> other ; ///< Rebinding result + }; + + protected: + //@cond + struct internal_disposer + { + void operator()( value_type * p ) + { + assert( p != null_ptr()); + + MSQueue::clear_links( node_traits::to_node_ptr(p) ); + disposer()( p ); + } + }; + + typedef intrusive::node_to_value node_to_value; + typedef typename opt::details::alignment_setter< typename node_type::atomic_node_ptr, options::alignment >::type aligned_node_ptr; + + typedef typename opt::details::alignment_setter< + cds::intrusive::details::dummy_node< gc, node_type>, + options::alignment + >::type dummy_node_type; + + aligned_node_ptr m_pHead ; ///< Queue's head pointer (cache-line aligned) + aligned_node_ptr m_pTail ; ///< Queue's tail pointer (cache-line aligned) + dummy_node_type m_Dummy ; ///< dummy node + item_counter m_ItemCounter ; ///< Item counter + stat m_Stat ; ///< Internal statistics + //@endcond + + //@cond + struct dequeue_result { + typename gc::template GuardArray<2> guards; + + node_type * pHead; + node_type * pNext; + }; + + bool do_dequeue( dequeue_result& res ) + { + node_type * pNext; + back_off bkoff; + + node_type * h; + while ( true ) { + h = res.guards.protect( 0, m_pHead, node_to_value() ); + pNext = h->m_pNext.load( memory_model::memory_order_relaxed ); + res.guards.assign( 1, node_to_value()( pNext )); + //pNext = res.guards.protect( 1, h->m_pNext, node_to_value() ); + if ( m_pHead.load(memory_model::memory_order_acquire) != h ) + continue; + + if ( pNext == null_ptr() ) + return false ; // empty queue + + node_type * t = m_pTail.load(memory_model::memory_order_acquire); + if ( h == t ) { + // It is needed to help enqueue + m_pTail.compare_exchange_strong( t, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + m_Stat.onBadTail(); + continue; + } + + if ( m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + break; + + m_Stat.onDequeueRace(); + bkoff(); + } + + --m_ItemCounter; + m_Stat.onDequeue(); + + res.pHead = h; + res.pNext = pNext; + return true; + } + + static void clear_links( node_type * pNode ) + { + pNode->m_pNext.store( null_ptr(), memory_model::memory_order_release ); + } + + void dispose_result( dequeue_result& res ) + { + dispose_node( res.pHead ); + } + + void dispose_node( node_type * p ) + { + if ( p != m_Dummy.get() ) { + gc::template retire( node_traits::to_value_ptr(p) ); + } + else { + // We cannot clear m_Dummy here since it leads to ABA. + // On the other hand, we cannot use deferred clear_links( &m_Dummy ) call via + // HP retiring cycle since m_Dummy is member of MSQueue and may be destroyed + // before HP retiring cycle invocation. + // So, we will never clear m_Dummy for gc::HP and gc::PTB + // However, gc::HRC nodes are managed by reference counting, so, we must + // call HP retire cycle. + m_Dummy.retire(); + } + } + //@endcond + + public: + /// Initializes empty queue + MSQueue() + : m_pHead( null_ptr() ) + , m_pTail( null_ptr() ) + { + // GC and node_type::gc must be the same + static_assert(( std::is_same::value ), "GC and node_type::gc must be the same"); + + // For cds::gc::HRC, only base_hook is allowed + static_assert(( + std::conditional< + std::is_same::value, + std::is_same< typename hook::hook_type, opt::base_hook_tag >, + boost::true_type + >::type::value + ), "For cds::gc::HRC, only base_hook is allowed"); + + // Head/tail initialization should be made via store call + // since gc::HRC manages reference counting + m_pHead.store( m_Dummy.get(), memory_model::memory_order_relaxed ); + m_pTail.store( m_Dummy.get(), memory_model::memory_order_relaxed ); + } + + /// Destructor clears the queue + /** + Since the Michael & Scott queue contains at least one item even + if the queue is empty, the destructor may call item disposer. + */ + ~MSQueue() + { + clear(); + + node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed); + + assert( pHead != null_ptr() ); + assert( pHead == m_pTail.load(memory_model::memory_order_relaxed) ); + + m_pHead.store( null_ptr(), memory_model::memory_order_relaxed ); + m_pTail.store( null_ptr(), memory_model::memory_order_relaxed ); + + dispose_node( pHead ); + } + + /// Returns queue's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the queue + is empty. To check queue emptyness use \ref empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns reference to internal statistics + const stat& statistics() const + { + return m_Stat; + } + + /// Enqueues \p val value into the queue. + /** @anchor cds_intrusive_MSQueue_enqueue + The function always returns \p true. + */ + bool enqueue( value_type& val ) + { + node_type * pNew = node_traits::to_node_ptr( val ); + link_checker::is_empty( pNew ); + + typename gc::Guard guard; + back_off bkoff; + + node_type * t; + while ( true ) { + t = guard.protect( m_pTail, node_to_value() ); + + node_type * pNext = t->m_pNext.load(memory_model::memory_order_acquire); + if ( pNext != null_ptr() ) { + // Tail is misplaced, advance it + m_pTail.compare_exchange_weak( t, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + m_Stat.onBadTail(); + continue; + } + + node_type * tmp = null_ptr(); + if ( t->m_pNext.compare_exchange_strong( tmp, pNew, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + break; + + m_Stat.onEnqueueRace(); + bkoff(); + } + ++m_ItemCounter; + m_Stat.onEnqueue(); + + if ( !m_pTail.compare_exchange_strong( t, pNew, memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed )) + m_Stat.onAdvanceTailFailed(); + return true; + } + + /// Dequeues a value from the queue + /** @anchor cds_intrusive_MSQueue_dequeue + If the queue is empty the function returns \p NULL. + + \par Warning + The queue algorithm has following feature: when \p dequeue is called, + the item returning is still queue's top, and previous top is disposed: + + \code + before dequeuing Dequeue after dequeuing + +------------------+ +------------------+ + Top ->| Item 1 | -> Dispose Item 1 | Item 2 | <- Top + +------------------+ +------------------+ + | Item 2 | -> Return Item 2 | ... | + +------------------+ + | ... | + \endcode + + \p dequeue function returns Item 2, that becomes new top of queue, and calls + the disposer for Item 1, that was queue's top on function entry. + Thus, you cannot manually delete item returned because it is still included in + item sequence and it has valuable link field that must not be zeroed. + The item may be deleted only in disposer call. + */ + value_type * dequeue() + { + dequeue_result res; + + if ( do_dequeue( res )) { + dispose_result( res ); + + return node_traits::to_value_ptr( *res.pNext ); + } + return null_ptr(); + } + + /// Synonym for \ref cds_intrusive_MSQueue_enqueue "enqueue" function + bool push( value_type& val ) + { + return enqueue( val ); + } + + /// Synonym for \ref cds_intrusive_MSQueue_dequeue "dequeue" function + value_type * pop() + { + return dequeue(); + } + + /// Checks if the queue is empty + bool empty() const + { + typename gc::Guard guard; + return guard.protect( m_pHead, node_to_value() )->m_pNext.load(memory_model::memory_order_relaxed) == null_ptr(); + } + + /// Clear the queue + /** + The function repeatedly calls \ref dequeue until it returns \p NULL. + The disposer defined in template \p Options is called for each item + that can be safely disposed. + */ + void clear() + { + while ( dequeue() ); + } + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_MSQUEUE_H diff --git a/cds/intrusive/node_traits.h b/cds/intrusive/node_traits.h new file mode 100644 index 00000000..6dda1f37 --- /dev/null +++ b/cds/intrusive/node_traits.h @@ -0,0 +1,166 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_NODE_TRAITS_H +#define __CDS_INTRUSIVE_NODE_TRAITS_H + +#include + +namespace cds { namespace intrusive { + +#ifdef CDS_DOXYGEN_INVOKED + /// Container's node traits + /** @ingroup cds_intrusive_helper + This traits is intended for converting between type \p T of value stored in the intrusive container + and container's node type \p NodeType. + + There are separate specializations for each \p Hook type. + */ + template + struct node_traits + { + typedef T value_type ; ///< Value type + typedef NodeType node_type ; ///< Node type + + /// Convert value reference to node pointer + static node_type * to_node_ptr( value_type& v ); + + /// Convert value pointer to node pointer + static node_type * to_node_ptr( value_type * v ); + + /// Convert value reference to node pointer (const version) + static const node_type * to_node_ptr( value_type const& v ); + + /// Convert value pointer to node pointer (const version) + static const node_type * to_node_ptr( value_type const * v ); + + /// Convert node refernce to value pointer + static value_type * to_value_ptr( node_type& n ); + + /// Convert node pointer to value pointer + static value_type * to_value_ptr( node_type * n ); + + /// Convert node reference to value pointer (const version) + static const value_type * to_value_ptr( node_type const & n ); + + /// Convert node pointer to value pointer (const version) + static const value_type * to_value_ptr( node_type const * n ); + }; + +#else + template + struct node_traits; +#endif + + //@cond + template + struct node_traits + { + typedef T value_type; + typedef NodeType node_type; + + static node_type * to_node_ptr( value_type& v ) + { + return static_cast( &v ); + } + static node_type * to_node_ptr( value_type * v ) + { + return v ? static_cast( v ) : reinterpret_cast( NULL ); + } + static const node_type * to_node_ptr( const value_type& v ) + { + return static_cast( &v ); + } + static const node_type * to_node_ptr( const value_type * v ) + { + return v ? static_cast( v ) : reinterpret_cast( NULL ); + } + static value_type * to_value_ptr( node_type& n ) + { + return static_cast( &n ); + } + static value_type * to_value_ptr( node_type * n ) + { + return n ? static_cast( n ) : reinterpret_cast( NULL ); + } + static const value_type * to_value_ptr( const node_type& n ) + { + return static_cast( &n ); + } + static const value_type * to_value_ptr( const node_type * n ) + { + return n ? static_cast( n ) : reinterpret_cast( NULL ); + } + }; + + template + struct node_traits + { + typedef T value_type; + typedef NodeType node_type; + + static node_type * to_node_ptr( value_type& v ) + { + return reinterpret_cast( reinterpret_cast(&v) + Hook::c_nMemberOffset ); + } + static node_type * to_node_ptr( value_type * v ) + { + return v ? to_node_ptr(*v) : reinterpret_cast( NULL ); + } + static const node_type * to_node_ptr( const value_type& v ) + { + return reinterpret_cast( reinterpret_cast(&v) + Hook::c_nMemberOffset ); + } + static const node_type * to_node_ptr( const value_type * v ) + { + return v ? to_node_ptr(*v) : reinterpret_cast( NULL ); + } + static value_type * to_value_ptr( node_type& n ) + { + return reinterpret_cast( reinterpret_cast(&n) - Hook::c_nMemberOffset ); + } + static value_type * to_value_ptr( node_type * n ) + { + return n ? to_value_ptr(*n) : reinterpret_cast( NULL ); + } + static const value_type * to_value_ptr( const node_type& n ) + { + return reinterpret_cast( reinterpret_cast(&n) - Hook::c_nMemberOffset ); + } + static const value_type * to_value_ptr( const node_type * n ) + { + return n ? to_value_ptr(*n) : reinterpret_cast( NULL ); + } + }; + + template + struct node_traits: public Hook::node_traits + {}; + //@endcond + + /// Node traits selector metafunction + /** @ingroup cds_intrusive_helper + The metafunction selects appropriate \ref node_traits specialization based on value type \p T, node type \p NodeType, and hook type \p Hook. + */ + template + struct get_node_traits + { + //@cond + typedef node_traits type; + //@endcond + }; + + //@cond + /// Functor converting container's node type to value type + template + struct node_to_value { + typename Container::value_type * operator()( typename Container::node_type * p ) + { + typedef typename Container::node_traits node_traits; + return node_traits::to_value_ptr( p ); + } + }; + //@endcond + +}} // namespace cds::intrusuve + +#endif // #ifndef __CDS_INTRUSIVE_NODE_TRAITS_H diff --git a/cds/intrusive/optimistic_queue.h b/cds/intrusive/optimistic_queue.h new file mode 100644 index 00000000..eed476bc --- /dev/null +++ b/cds/intrusive/optimistic_queue.h @@ -0,0 +1,617 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_OPTIMISTIC_QUEUE_H +#define __CDS_INTRUSIVE_OPTIMISTIC_QUEUE_H + +#include +#include +#include +#include +#include +#include + +#include + +namespace cds { namespace intrusive { + + /// Optimistic queue related definitions + /** @ingroup cds_intrusive_helper + */ + namespace optimistic_queue { + + /// Optimistic queue node + /** + Template parameters: + - GC - garbage collector used. gc::HRC is not supported. + - Tag - a tag used to distinguish between different implementation + */ + template + struct node: public GC::container_node + { + typedef GC gc ; ///< Garbage collector + typedef Tag tag ; ///< tag + + typedef typename gc::template atomic_ref atomic_node_ptr ; ///< atomic pointer + + atomic_node_ptr m_pPrev ; ///< Pointer to previous node + atomic_node_ptr m_pNext ; ///< Pointer to next node + + CDS_CONSTEXPR node() CDS_NOEXCEPT + : m_pPrev( null_ptr() ) + , m_pNext( null_ptr() ) + {} + }; + + //@cond + struct default_hook { + typedef cds::gc::default_gc gc; + typedef opt::none tag; + }; + //@endcond + + //@cond + template < typename HookType, CDS_DECL_OPTIONS2> + struct hook + { + typedef typename opt::make_options< default_hook, CDS_OPTIONS2>::type options; + typedef typename options::gc gc; + typedef typename options::tag tag; + typedef node node_type; + typedef HookType hook_type; + }; + //@endcond + + /// Base hook + /** + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - tag + */ + template < CDS_DECL_OPTIONS2 > + struct base_hook: public hook< opt::base_hook_tag, CDS_OPTIONS2 > + {}; + + /// Member hook + /** + \p MemberOffset defines offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - tag + */ + template < size_t MemberOffset, CDS_DECL_OPTIONS2 > + struct member_hook: public hook< opt::member_hook_tag, CDS_OPTIONS2 > + { + //@cond + static const size_t c_nMemberOffset = MemberOffset; + //@endcond + }; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - tag + */ + template + struct traits_hook: public hook< opt::traits_hook_tag, CDS_OPTIONS2 > + { + //@cond + typedef NodeTraits node_traits; + //@endcond + }; + + /// Check link + template + struct link_checker { + //@cond + typedef Node node_type; + //@endcond + + /// Checks if the link fields of node \p pNode is NULL + /** + An asserting is generated if \p pNode link fields is not NULL + */ + static void is_empty( const node_type * pNode ) + { + assert( pNode->m_pNext.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr() ); + assert( pNode->m_pPrev.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr() ); + } + }; + + /// Metafunction for selecting appropriate link checking policy + template < typename Node, opt::link_check_type LinkType > + struct get_link_checker + { + //@cond + typedef intrusive::opt::v::empty_link_checker type; + //@endcond + }; + + //@cond + template < typename Node > + struct get_link_checker< Node, opt::always_check_link > + { + typedef link_checker type; + }; + template < typename Node > + struct get_link_checker< Node, opt::debug_check_link > + { +# ifdef _DEBUG + typedef link_checker type; +# else + typedef intrusive::opt::v::empty_link_checker type; +# endif + }; + //@endcond + + /// OptimisticQueue internal statistics. May be used for debugging or profiling + /** + Template argument \p Counter defines type of counter. + Default is cds::atomicity::event_counter, that is weak, i.e. it is not guaranteed + strict event counting. + You may use stronger type of counter like as cds::atomicity::item_counter, + or even integral type, for example, \p int. + + The class extends intrusive::queue_stat interface for OptimisticQueue. + */ + template + struct stat: public cds::intrusive::queue_stat + { + //@cond + typedef cds::intrusive::queue_stat base_class; + typedef typename base_class::counter_type counter_type; + //@endcond + + counter_type m_FixListCount ; ///< Count of fix list event + + /// Register fix list event + void onFixList() { ++m_FixListCount; } + + //@cond + void reset() + { + base_class::reset(); + m_FixListCount.reset(); + } + + stat& operator +=( stat const& s ) + { + base_class::operator +=( s ); + m_FixListCount += s.m_FixListCount.get(); + return *this; + } + //@endcond + }; + + /// Dummy OptimisticQueue statistics - no counting is performed. Support interface like \ref optimistic_queue::stat + struct dummy_stat: public cds::intrusive::queue_dummy_stat + { + //@cond + void onFixList() {} + + void reset() {} + dummy_stat& operator +=( dummy_stat const& ) + { + return *this; + } + //@endcond + }; + + } // namespace optimistic_queue + + /// Optimistic queue + /** @ingroup cds_intrusive_queue + Implementation of Ladan-Mozes & Shavit optimistic queue algorithm. + + \par Source: + [2008] Edya Ladan-Mozes, Nir Shavit "An Optimistic Approach to Lock-Free FIFO Queues" + + Template arguments: + - \p GC - garbage collector type: gc::HP, gc::PTB. Note that gc::HRC is not supported + - \p T - type to be stored in the queue + - \p Options - options + + Type of node: \ref optimistic_queue::node. + + \p Options are: + - opt::hook - hook used. Possible values are: optimistic_queue::base_hook, optimistic_queue::member_hook, optimistic_queue::traits_hook. + If the option is not specified, optimistic_queue::base_hook<> is used. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. + - opt::disposer - the functor used for dispose removed items. Default is opt::v::empty_disposer. This option is used + in \ref dequeue function. + - opt::link_checker - the type of node's link fields checking. Default is \ref opt::debug_check_link + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter + - opt::stat - the type to gather internal statistics. + Possible option value are: optimistic_queue::stat, optimistic_queue::dummy_stat, + user-provided class that supports optimistic_queue::stat interface. + Generic option intrusive::queue_stat and intrusive::queue_dummy_stat are acceptable too, however, + they will be automatically converted to optimistic_queue::stat and optimistic_queue::dummy_stat + respectively. + Default is \ref optimistic_queue::dummy_stat. + - opt::alignment - the alignment for internal queue data. Default is opt::cache_line_alignment + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + + Garbage collecting schema \p GC must be consistent with the optimistic_queue::node GC. + + \par About item disposing + The optimistic queue algo has a key feature: even if the queue is empty it contains one item that is "dummy" one from + the standpoint of the algo. See \ref dequeue function for explanation. + + \par Examples + \code + #include + #include + + namespace ci = cds::inrtusive; + typedef cds::gc::HP hp_gc; + + // Optimistic queue with Hazard Pointer garbage collector, base hook + item counter: + struct Foo: public ci::optimistic_queue::node< hp_gc > + { + // Your data + ... + }; + + typedef ci::OptimisticQueue< hp_gc, + Foo + ,ci::opt::hook< + ci::optimistic_queue::base_hook< ci::opt::gc< hp_gc > > + > + ,cds::opt::item_counter< cds::atomicity::item_counter > + > FooQueue; + + // Optimistic queue with Hazard Pointer garbage collector, member hook, no item counter: + struct Bar + { + // Your data + ... + ci::optimistic_queue::node< hp_gc > hMember; + }; + + typedef ci::OptimisticQueue< hp_gc, + Bar + ,ci::opt::hook< + ci::optimistic_queue::member_hook< + offsetof(Bar, hMember) + ,ci::opt::gc< hp_gc > + > + > + > BarQueue; + + \endcode + */ + template + class OptimisticQueue + { + //@cond + struct default_options + { + typedef cds::backoff::empty back_off; + typedef optimistic_queue::base_hook<> hook; + typedef opt::v::empty_disposer disposer; + typedef atomicity::empty_item_counter item_counter; + typedef opt::v::relaxed_ordering memory_model; + typedef optimistic_queue::dummy_stat stat; + static const opt::link_check_type link_checker = opt::debug_check_link; + enum { alignment = opt::cache_line_alignment }; + }; + //@endcond + + public: + //@cond + typedef typename opt::make_options< + typename cds::opt::find_type_traits< default_options, CDS_OPTIONS9 >::type + ,CDS_OPTIONS9 + >::type options; + + typedef typename std::conditional< + std::is_same >::value + ,optimistic_queue::stat<> + ,typename std::conditional< + std::is_same::value + ,optimistic_queue::dummy_stat + ,typename options::stat + >::type + >::type stat_type_; + + //@endcond + + public: + typedef T value_type ; ///< type of value stored in the queue + typedef typename options::hook hook ; ///< hook type + typedef typename hook::node_type node_type ; ///< node type + typedef typename options::disposer disposer ; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + typedef typename optimistic_queue::get_link_checker< node_type, options::link_checker >::type link_checker ; ///< link checker + + typedef GC gc ; ///< Garbage collector + typedef typename options::back_off back_off ; ///< back-off strategy + typedef typename options::item_counter item_counter ; ///< Item counting policy used + typedef typename options::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option +#ifdef CDS_DOXYGEN_INVOKED + typedef typename options::stat stat ; ///< Internal statistics policy used +#else + typedef stat_type_ stat; +#endif + + /// Rebind template arguments + template + struct rebind { + typedef OptimisticQueue< GC2, T2, CDS_OTHER_OPTIONS9> other ; ///< Rebinding result + }; + + protected: + //@cond + + struct internal_disposer + { + void operator ()( value_type * p ) + { + assert( p != null_ptr()); + + OptimisticQueue::clear_links( node_traits::to_node_ptr(*p) ); + disposer()( p ); + } + }; + + typedef intrusive::node_to_value node_to_value; + typedef typename opt::details::alignment_setter< typename node_type::atomic_node_ptr, options::alignment >::type aligned_node_ptr; + //@endcond + + aligned_node_ptr m_pTail ; ///< Pointer to tail node + aligned_node_ptr m_pHead ; ///< Pointer to head node + node_type m_Dummy ; ///< dummy node + + item_counter m_ItemCounter ; ///< Item counter + stat m_Stat ; ///< Internal statistics + + static CDS_CONSTEXPR_CONST size_t c_nHazardPtrCount = 5 ; ///< Count of hazard pointer required for the algorithm + + protected: + //@cond + static void clear_links( node_type * pNode ) + { + pNode->m_pNext.store( null_ptr(), memory_model::memory_order_release ); + pNode->m_pPrev.store( null_ptr(), memory_model::memory_order_release ); + } + + struct dequeue_result { + typename gc::template GuardArray<3> guards; + + node_type * pHead; + node_type * pNext; + }; + + bool do_dequeue( dequeue_result& res ) + { + node_type * pTail; + node_type * pHead; + node_type * pFirstNodePrev; + back_off bkoff; + + while ( true ) { // Try till success or empty + pHead = res.guards.protect( 0, m_pHead, node_to_value() ); + pTail = res.guards.protect( 1, m_pTail, node_to_value() ); + assert( pHead != null_ptr() ); + pFirstNodePrev = res.guards.protect( 2, pHead->m_pPrev, node_to_value() ); + + if ( pHead == m_pHead.load(memory_model::memory_order_relaxed)) { + if ( pTail != pHead ) { + if ( pFirstNodePrev == null_ptr() + || pFirstNodePrev->m_pNext.load(memory_model::memory_order_relaxed) != pHead ) + { + fix_list( pTail, pHead ); + continue; + } + if ( m_pHead.compare_exchange_weak( pHead, pFirstNodePrev, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { + // dequeue success + break; + } + } + else { + // the queue is empty + return false; + } + } + + m_Stat.onDequeueRace(); + bkoff(); + } + + --m_ItemCounter; + m_Stat.onDequeue(); + + res.pHead = pHead; + res.pNext = pFirstNodePrev; + return true; + } + + + /// Helper function for optimistic queue. Corrects \p prev pointer of queue's nodes if it is needed + void fix_list( node_type * pTail, node_type * pHead ) + { + // pTail and pHead are already guarded + + node_type * pCurNode; + node_type * pCurNodeNext; + + typename gc::template GuardArray<2> guards; + + pCurNode = pTail; + while ( pCurNode != pHead ) { // While not at head + pCurNodeNext = guards.protect(0, pCurNode->m_pNext, node_to_value() ); + if ( pHead != m_pHead.load(memory_model::memory_order_relaxed) ) + break; + pCurNodeNext->m_pPrev.store( pCurNode, memory_model::memory_order_release ); + guards.assign( 1, node_traits::to_value_ptr( pCurNode = pCurNodeNext )); + } + + m_Stat.onFixList(); + } + + void dispose_result( dequeue_result& res ) + { + dispose_node( res.pHead ); + } + + void dispose_node( node_type * p ) + { + assert( p != null_ptr()); + + if ( p != &m_Dummy ) { + gc::template retire( node_traits::to_value_ptr(p) ); + } + } + + //@endcond + + public: + /// Constructor creates empty queue + OptimisticQueue() + : m_pTail( null_ptr() ) + , m_pHead( null_ptr() ) + { + // GC and node_type::gc must be the same + static_assert(( std::is_same::value ), "GC and node_type::gc must be the same"); + + // cds::gc::HRC is not allowed + static_assert(( !std::is_same::value ), "cds::gc::HRC is not allowed here"); + + m_pTail.store( &m_Dummy, memory_model::memory_order_relaxed ); + m_pHead.store( &m_Dummy, memory_model::memory_order_relaxed ); + } + + ~OptimisticQueue() + { + clear(); + node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed); + CDS_DEBUG_DO( node_type * pTail = m_pTail.load(memory_model::memory_order_relaxed); ) + CDS_DEBUG_DO( assert( pHead == pTail ); ) + assert( pHead != null_ptr() ); + + m_pHead.store( null_ptr(), memory_model::memory_order_relaxed ); + m_pTail.store( null_ptr(), memory_model::memory_order_relaxed ); + + dispose_node( pHead ); + } + + /// @anchor cds_intrusive_OptimisticQueue_enqueue Enqueues \p data in lock-free manner. Always return \a true + bool enqueue( value_type& val ) + { + node_type * pNew = node_traits::to_node_ptr( val ); + link_checker::is_empty( pNew ); + + typename gc::template GuardArray<2> guards; + back_off bkoff; + + guards.assign( 1, &val ); + node_type * pTail = guards.protect( 0, m_pTail, node_to_value() ) ; // Read the tail + while( true ) { + pNew->m_pNext.store( pTail, memory_model::memory_order_release ); + if ( m_pTail.compare_exchange_strong( pTail, pNew, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) { // Try to CAS the tail + pTail->m_pPrev.store( pNew, memory_model::memory_order_release ) ; // Success, write prev + ++m_ItemCounter; + m_Stat.onEnqueue(); + break ; // Enqueue done! + } + guards.assign( 0, node_traits::to_value_ptr( pTail ) ) ; // pTail has been changed by CAS above + m_Stat.onEnqueueRace(); + bkoff(); + } + return true; + } + + /// Dequeues a value from the queue + /** @anchor cds_intrusive_OptimisticQueue_dequeue + If the queue is empty the function returns \a NULL + + \par Warning + The queue algorithm has following feature: when \p dequeue is called, + the item returning is still queue's top, and previous top is disposed: + + \code + before dequeuing Dequeue after dequeuing + +------------------+ +------------------+ + Top ->| Item 1 | -> Dispose Item 1 | Item 2 | <- Top + +------------------+ +------------------+ + | Item 2 | -> Return Item 2 | ... | + +------------------+ + | ... | + \endcode + + \p dequeue function returns Item 2, that becomes new top of queue, and calls + the disposer for Item 1, that was queue's top on function entry. + Thus, you cannot manually delete item returned because it is still included in + item sequence and it has valuable link field that must not be zeroed. + The item may be deleted only in disposer call. + */ + value_type * dequeue() + { + dequeue_result res; + if ( do_dequeue( res )) { + dispose_result( res ); + + return node_traits::to_value_ptr( *res.pNext ); + } + return null_ptr(); + } + + /// Synonym for @ref cds_intrusive_OptimisticQueue_enqueue "enqueue" + bool push( value_type& val ) + { + return enqueue( val ); + } + + /// Synonym for \ref cds_intrusive_OptimisticQueue_dequeue "dequeue" + value_type * pop() + { + return dequeue(); + } + + /// Checks if queue is empty + bool empty() const + { + return m_pTail.load(memory_model::memory_order_relaxed) == m_pHead.load(memory_model::memory_order_relaxed); + } + + /// Clear the stack + /** + The function repeatedly calls \ref dequeue until it returns NULL. + The disposer defined in template \p Options is called for each item + that can be safely disposed. + */ + void clear() + { + value_type * pv; + while ( (pv = dequeue()) != null_ptr() ); + } + + /// Returns queue's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the queue + is empty. To check queue emptyness use \ref empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns refernce to internal statistics + const stat& statistics() const + { + return m_Stat; + } + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_OPTIMISTIC_QUEUE_H diff --git a/cds/intrusive/options.h b/cds/intrusive/options.h new file mode 100644 index 00000000..5478b797 --- /dev/null +++ b/cds/intrusive/options.h @@ -0,0 +1,161 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_OPTIONS_H +#define __CDS_INTRUSIVE_OPTIONS_H + +#include +#include + +namespace cds { namespace intrusive { + + /// Common options for intrusive containers + /** @ingroup cds_intrusive_helper + This namespace contains options for intrusive containers. + It imports all definitions from cds::opt namespace and introduces a lot + of options specific for intrusive approach. + */ + namespace opt { + using namespace cds::opt; + + //@cond + struct base_hook_tag; + struct member_hook_tag; + struct traits_hook_tag; + //@endcond + + /// Hook option + /** + Hook is a class that a user must add as a base class or as a member to make the user class compatible with intrusive containers. + \p Hook template parameter strongly depends on the type of intrusive container you use. + */ + template + struct hook { + //@cond + template struct pack: public Base + { + typedef Hook hook; + }; + //@endcond + }; + + /// Item disposer option setter + /** + The option specifies a functor that is used for dispose removed items. + The interface of \p Type functor is: + \code + struct myDisposer { + void operator ()( T * val ); + }; + \endcode + + Predefined types for \p Type: + - opt::v::empty_disposer - the disposer that does nothing + - opt::v::delete_disposer - the disposer that calls operator \p delete + + Usually, the disposer should be stateless default-constructible functor. + It is called by garbage collector in deferred mode. + */ + template + struct disposer { + //@cond + template struct pack: public Base + { + typedef Type disposer; + }; + //@endcond + }; + + /// Values of \ref cds::intrusive::opt::link_checker option + enum link_check_type { + never_check_link, ///< no link checking performed + debug_check_link, ///< check only in debug build + always_check_link ///< check in debug and release build + }; + + /// Link checking + /** + The option specifies a type of link checking. + Possible values for \p Value are is one of \ref link_check_type enum: + - \ref never_check_link - no link checking performed + - \ref debug_check_link - check only in debug build + - \ref always_check_link - check in debug and release build (not yet implemented for release mode). + + When link checking is on, the container tests that the node's link fields + must be NULL before inserting the item. If the link is not NULL an assertion is generated + */ + template + struct link_checker { + //@cond + template struct pack: public Base + { + static const link_check_type link_checker = Value; + }; + //@endcond + }; + + /// Predefined option values + namespace v { + using namespace cds::opt::v; + + //@cond + /// No link checking + template + struct empty_link_checker + { + //@cond + typedef Node node_type; + + static void is_empty( const node_type * pNode ) + {} + //@endcond + }; + //@endcond + + /// Empty item disposer + /** + The disposer does nothing. + This is one of possible values of opt::disposer option. + */ + struct empty_disposer + { + /// Empty dispose functor + template + void operator ()( T * ) + {} + }; + + /// Deletion item disposer + /** + Analogue of operator \p delete call. + The disposer that calls \p T destructor and deallocates the item via \p Alloc allocator. + */ + template + struct delete_disposer + { + /// Dispose functor + template + void operator ()( T * p ) + { + cds::details::Allocator alloc; + alloc.Delete( p ); + } + }; + } // namespace v + + //@cond + // Lazy-list specific option (for split-list support) + template + struct boundary_node_type { + //@cond + template struct pack: public Base + { + typedef Type boundary_node_type; + }; + //@endcond + }; + //@endcond + } // namespace opt + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_OPTIONS_H diff --git a/cds/intrusive/queue_stat.h b/cds/intrusive/queue_stat.h new file mode 100644 index 00000000..cd71314f --- /dev/null +++ b/cds/intrusive/queue_stat.h @@ -0,0 +1,93 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_QUEUE_STAT_H +#define __CDS_INTRUSIVE_QUEUE_STAT_H + +#include + +namespace cds { namespace intrusive { + + /// Queue internal statistics. May be used for debugging or profiling + /** @ingroup cds_intrusive_helper + Template argument \p Counter defines type of counter. + Default is cds::atomicity::event_counter, that is weak, i.e. it is not guaranteed + strict event counting. + You may use stronger type of counter like as cds::atomicity::item_counter, + or even integral type, for example, \p int. + */ + template + struct queue_stat + { + typedef Counter counter_type ; ///< Counter type + + counter_type m_EnqueueCount ; ///< Enqueue call count + counter_type m_DequeueCount ; ///< Dequeue call count + counter_type m_EnqueueRace ; ///< Count of enqueue race conditions encountered + counter_type m_DequeueRace ; ///< Count of dequeue race conditions encountered + counter_type m_AdvanceTailError ; ///< Count of "advance tail failed" events + counter_type m_BadTail ; ///< Count of events "Tail is not pointed to the last item in the queue" + + /// Register enqueue call + void onEnqueue() { ++m_EnqueueCount; } + /// Register dequeue call + void onDequeue() { ++m_DequeueCount; } + /// Register enqueue race event + void onEnqueueRace() { ++m_EnqueueRace; } + /// Register dequeue race event + void onDequeueRace() { ++m_DequeueRace; } + /// Register "advance tail failed" event + void onAdvanceTailFailed() { ++m_AdvanceTailError; } + /// Register event "Tail is not pointed to last item in the queue" + void onBadTail() { ++m_BadTail; } + + //@cond + void reset() + { + m_EnqueueCount.reset(); + m_DequeueCount.reset(); + m_EnqueueRace.reset(); + m_DequeueRace.reset(); + m_AdvanceTailError.reset(); + m_BadTail.reset(); + } + + queue_stat& operator +=( queue_stat const& s ) + { + m_EnqueueCount += s.m_EnqueueCount.get(); + m_DequeueCount += s.m_DequeueCount.get(); + m_EnqueueRace += s.m_EnqueueRace.get(); + m_DequeueRace += s.m_DequeueRace.get(); + m_AdvanceTailError += s.m_AdvanceTailError.get(); + m_BadTail += s.m_BadTail.get(); + + return *this; + } + //@endcond + }; + + /// Dummy queue statistics - no counting is performed. Support interface like \ref queue_stat + /** @ingroup cds_intrusive_helper + */ + struct queue_dummy_stat + { + //@cond + void onEnqueue() {} + void onDequeue() {} + void onEnqueueRace() {} + void onDequeueRace() {} + void onAdvanceTailFailed() {} + void onBadTail() {} + + void reset() {} + queue_dummy_stat& operator +=( queue_dummy_stat const& s ) + { + return *this; + } + //@endcond + }; + + +}} // namespace cds::intrusive + + +#endif // #ifndef __CDS_INTRUSIVE_QUEUE_STAT_H diff --git a/cds/intrusive/segmented_queue.h b/cds/intrusive/segmented_queue.h new file mode 100644 index 00000000..0900d9f1 --- /dev/null +++ b/cds/intrusive/segmented_queue.h @@ -0,0 +1,681 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_SEGMENTED_QUEUE_H +#define __CDS_INTRUSIVE_SEGMENTED_QUEUE_H + +#include +#include +#include +#include +#include +#include + +#include + +#if CDS_COMPILER == CDS_COMPILER_MSVC +# pragma warning( push ) +# pragma warning( disable: 4355 ) // warning C4355: 'this' : used in base member initializer list +#endif + +namespace cds { namespace intrusive { + + /// SegmentedQueue -related declarations + namespace segmented_queue { + + /// SegmentedQueue internal statistics. May be used for debugging or profiling + template + struct stat { + typedef Counter counter_type; ///< Counter type + + counter_type m_nPush; ///< Push count + counter_type m_nPushPopulated; ///< Number of attempts to push to populated (non-empty) cell + counter_type m_nPushContended; ///< Number of failed CAS when pushing + counter_type m_nPop; ///< Pop count + counter_type m_nPopEmpty; ///< Number of dequeuing from empty queue + counter_type m_nPopContended; ///< Number of failed CAS when popping + + counter_type m_nCreateSegmentReq; ///< Number of request to create new segment + counter_type m_nDeleteSegmentReq; ///< Number to request to delete segment + counter_type m_nSegmentCreated; ///< Number of created segments + counter_type m_nSegmentDeleted; ///< Number of deleted segments + + //@cond + void onPush() { ++m_nPush; } + void onPushPopulated() { ++m_nPushPopulated; } + void onPushContended() { ++m_nPushContended; } + void onPop() { ++m_nPop; } + void onPopEmpty() { ++m_nPopEmpty; } + void onPopContended() { ++m_nPopContended; } + void onCreateSegmentReq() { ++m_nCreateSegmentReq; } + void onDeleteSegmentReq() { ++m_nDeleteSegmentReq; } + void onSegmentCreated() { ++m_nSegmentCreated; } + void onSegmentDeleted() { ++m_nSegmentDeleted; } + //@endcond + }; + + /// Dummy SegmentedQueue statistics, no overhead + struct empty_stat { + //@cond + void onPush() const {} + void onPushPopulated() const {} + void onPushContended() const {} + void onPop() const {} + void onPopEmpty() const {} + void onPopContended() const {} + void onCreateSegmentReq() const {} + void onDeleteSegmentReq() const {} + void onSegmentCreated() const {} + void onSegmentDeleted() const {} + //@endcond + }; + + /// SegmentedQueue default type traits + struct type_traits { + /// Element disposer that is called when the item to be dequeued. Default is opt::v::empty_disposer (no disposer) + typedef opt::v::empty_disposer disposer; + + /// Item counter, default is atomicity::item_counter + /** + The item counting is an essential part of segmented queue algorithm. + The \p empty() member function is based on checking size() == 0. + Therefore, dummy item counter like atomicity::empty_item_counter is not the proper counter. + */ + typedef atomicity::item_counter item_counter; + + /// Internal statistics, possible predefined types are \ref stat, \ref empty_stat (the default) + typedef segmented_queue::empty_stat stat; + + /// Memory model, default is opt::v::relaxed_ordering. See cds::opt::memory_model for the full list of possible types + typedef opt::v::relaxed_ordering memory_model; + + /// Alignment of critical data, default is cache line alignment. See cds::opt::alignment option specification + enum { alignment = opt::cache_line_alignment }; + + /// Segment allocator. Default is \ref CDS_DEFAULT_ALLOCATOR + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Lock type used to maintain an internal list of allocated segments + typedef cds::lock::Spin lock_type; + + /// Random \ref cds::opt::permutation_generator "permutation generator" for sequence [0, quasi_factor) + typedef cds::opt::v::random2_permutation permutation_generator; + }; + + /// Metafunction converting option list to traits for SegmentedQueue + /** + The metafunction can be useful if a few fields in \ref type_traits should be changed. + For example: + \code + typedef cds::intrusive::segmented_queue::make_traits< + cds::opt::item_counter< cds::atomicity::item_counter > + >::type my_segmented_queue_traits; + \endcode + This code creates \p %SegmentedQueue type traits with item counting feature, + all other \p type_traits members left unchanged. + + \p Options are: + - \p opt::disposer - the functor used for dispose removed items. + - \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default) + - \p opt::item_counter - item counting feature. Note that atomicity::empty_item_counetr is not suitable + for segmented queue. + - \p opt::memory_model - memory model, default is \p opt::v::relaxed_ordering. + See option description for the full list of possible models + - \p opt::alignment - the alignment for critical data, see option description for explanation + - \p opt::allocator - the allocator used t maintain segments. + - \p opt::lock_type - a mutual exclusion lock type used to maintain internal list of allocated + segments. Default is \p cds::opt::Spin, \p std::mutex is also suitable. + - \p opt::permutation_generator - a random permutation generator for sequence [0, quasi_factor), + default is cds::opt::v::random2_permutation + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS9 >::type + ,CDS_OPTIONS9 + >::type type; +# endif + }; + } // namespace segmented_queue + + /// Segmented queue + /** @ingroup cds_intrusive_queue + + The queue is based on work + - [2010] Afek, Korland, Yanovsky "Quasi-Linearizability: relaxed consistency for improved concurrency" + + In this paper the authors offer a relaxed version of linearizability, so-called quasi-linearizability, + that preserves some of the intuition, provides a flexible way to control the level of relaxation + and supports th implementation of more concurrent and scalable data structure. + Intuitively, the linearizability requires each run to be equivalent in some sense to a serial run + of the algorithm. This equivalence to some serial run imposes strong synchronization requirements + that in many cases results in limited scalability and synchronization bottleneck. + + The general idea is that the queue maintains a linked list of segments, each segment is an array of + nodes in the size of the quasi factor, and each node has a deleted boolean marker, which states + if it has been dequeued. Each producer iterates over last segment in the linked list in some random + permutation order. Whet it finds an empty cell it performs a CAS operation attempting to enqueue its + new element. In case the entire segment has been scanned and no available cell is found (implying + that the segment is full), then it attempts to add a new segment to the list. + + The dequeue operation is similar: the consumer iterates over the first segment in the linked list + in some random permutation order. When it finds an item which has not yet been dequeued, it performs + CAS on its deleted marker in order to "delete" it, if succeeded this item is considered dequeued. + In case the entire segment was scanned and all the nodes have already been dequeued (implying that + the segment is empty), then it attempts to remove this segment from the linked list and starts + the same process on the next segment. If there is no next segment, the queue is considered empty. + + Based on the fact that most of the time threads do not add or remove segments, most of the work + is done in parallel on different cells in the segments. This ensures a controlled contention + depending on the segment size, which is quasi factor. + + The segmented queue is an unfair queue since it violates the strong FIFO order but no more than + quasi factor. This means that the consumer dequeues any item from the current first segment. + + Template parameters: + - \p GC - a garbage collector, possible types are cds::gc::HP, cds::gc::PTB + - \p T - the type of values stored in the queue + - \p Traits - queue type traits, default is segmented_queue::type_traits. + segmented_queue::make_traits metafunction can be used to construct the + type traits. + + The queue stores the pointers to enqueued items so no special node hooks are needed. + */ + template + class SegmentedQueue + { + public: + typedef GC gc ; ///< Garbage collector + typedef T value_type ; ///< type of the value stored in the queue + typedef Traits options ; ///< Queue's traits + + typedef typename options::disposer disposer ; ///< value disposer, called only in \p clear() when the element to be dequeued + typedef typename options::allocator allocator ; ///< Allocator maintaining the segments + typedef typename options::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename options::item_counter item_counter; ///< Item counting policy, see cds::opt::item_counter option setter + typedef typename options::stat stat ; ///< Internal statistics policy + typedef typename options::lock_type lock_type ; ///< Type of mutex for maintaining an internal list of allocated segments. + typedef typename options::permutation_generator permutation_generator; ///< Random permutation generator for sequence [0, quasi-factor) + + static const size_t m_nHazardPtrCount = 2 ; ///< Count of hazard pointer required for the algorithm + + protected: + //@cond + // Segment cell. LSB is used as deleted mark + typedef cds::details::marked_ptr< value_type, 1 > cell; + + // Segment + struct segment: public boost::intrusive::slist_base_hook<> + { + CDS_ATOMIC::atomic< cell > * cells; // Cell array of size \ref m_nQuasiFactor + size_t version; // version tag (ABA prevention tag) + // cell array is placed here in one continuous memory block + + // Initializes the segment + segment( size_t nCellCount ) + // MSVC warning C4355: 'this': used in base member initializer list + : cells( reinterpret_cast< CDS_ATOMIC::atomic< cell > * >( this + 1 )) + , version( 0 ) + { + init( nCellCount ); + } + + void init( size_t nCellCount ) + { + CDS_ATOMIC::atomic< cell > * pLastCell = cells + nCellCount; + for ( CDS_ATOMIC::atomic< cell > * pCell = cells; pCell < pLastCell; ++pCell ) + pCell->store( cell(), CDS_ATOMIC::memory_order_relaxed ); + CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release ); + } + + private: + segment(); //=delete + }; + + typedef typename opt::details::alignment_setter< CDS_ATOMIC::atomic, options::alignment >::type aligned_segment_ptr; + //@endcond + + protected: + //@cond + class segment_list + { + typedef boost::intrusive::slist< segment, boost::intrusive::cache_last< true > > list_impl; + typedef cds_std::unique_lock< lock_type > scoped_lock; + + aligned_segment_ptr m_pHead; + aligned_segment_ptr m_pTail; + + list_impl m_List; + mutable lock_type m_Lock; + size_t const m_nQuasiFactor; + stat& m_Stat; + + private: + struct segment_disposer + { + void operator()( segment * pSegment ) + { + assert( pSegment != null_ptr()); + free_segment( pSegment ); + } + }; + + struct gc_segment_disposer + { + void operator()( segment * pSegment ) + { + assert( pSegment != null_ptr()); + retire_segment( pSegment ); + } + }; + + public: + segment_list( size_t nQuasiFactor, stat& st ) + : m_pHead( null_ptr() ) + , m_pTail( null_ptr() ) + , m_nQuasiFactor( nQuasiFactor ) + , m_Stat( st ) + { + assert( cds::beans::is_power2( nQuasiFactor )); + } + + ~segment_list() + { + m_List.clear_and_dispose( gc_segment_disposer() ); + } + + segment * head( typename gc::Guard& guard ) + { + return guard.protect( m_pHead ); + } + + segment * tail( typename gc::Guard& guard ) + { + return guard.protect( m_pTail ); + } + +# ifdef _DEBUG + bool populated( segment const& s ) const + { + // The lock should be held + CDS_ATOMIC::atomic< cell > const * pLastCell = s.cells + quasi_factor(); + for ( CDS_ATOMIC::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) { + if ( !pCell->load( memory_model::memory_order_relaxed ).all() ) + return false; + } + return true; + } + bool exhausted( segment const& s ) const + { + // The lock should be held + CDS_ATOMIC::atomic< cell > const * pLastCell = s.cells + quasi_factor(); + for ( CDS_ATOMIC::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) { + if ( !pCell->load( memory_model::memory_order_relaxed ).bits() ) + return false; + } + return true; + } +# endif + + segment * create_tail( segment * pTail, typename gc::Guard& guard ) + { + // pTail is guarded by GC + + m_Stat.onCreateSegmentReq(); + + scoped_lock l( m_Lock ); + + if ( !m_List.empty() && ( pTail != &m_List.back() || get_version(pTail) != m_List.back().version )) { + m_pTail.store( &m_List.back(), memory_model::memory_order_relaxed ); + + return guard.assign( &m_List.back() ); + } + + assert( m_List.empty() || populated( m_List.back() )); + + segment * pNew = allocate_segment(); + m_Stat.onSegmentCreated(); + + if ( m_List.empty() ) + m_pHead.store( pNew, memory_model::memory_order_relaxed ); + m_List.push_back( *pNew ); + m_pTail.store( pNew, memory_model::memory_order_release ); + return guard.assign( pNew ); + } + + segment * remove_head( segment * pHead, typename gc::Guard& guard ) + { + // pHead is guarded by GC + m_Stat.onDeleteSegmentReq(); + + segment * pRet; + { + scoped_lock l( m_Lock ); + + if ( m_List.empty() ) { + m_pTail.store( null_ptr(), memory_model::memory_order_relaxed ); + m_pHead.store( null_ptr(), memory_model::memory_order_relaxed ); + return guard.assign( null_ptr() ); + } + + if ( pHead != &m_List.front() || get_version(pHead) != m_List.front().version ) { + m_pHead.store( &m_List.front(), memory_model::memory_order_relaxed ); + return guard.assign( &m_List.front() ); + } + + assert( exhausted(m_List.front()) ); + + m_List.pop_front(); + if ( m_List.empty() ) { + pRet = guard.assign( null_ptr() ); + m_pTail.store( null_ptr(), memory_model::memory_order_relaxed ); + } + else + pRet = guard.assign( &m_List.front() ); + m_pHead.store( pRet, memory_model::memory_order_release ); + } + + retire_segment( pHead ); + m_Stat.onSegmentDeleted(); + + return pRet; + } + + size_t quasi_factor() const + { + return m_nQuasiFactor; + } + + private: + typedef cds::details::Allocator< segment, allocator > segment_allocator; + + static size_t get_version( segment * pSegment ) + { + return pSegment ? pSegment->version : 0; + } + + segment * allocate_segment() + { + return segment_allocator().NewBlock( sizeof(segment) + sizeof(cell) * m_nQuasiFactor, + quasi_factor() ); + } + + static void free_segment( segment * pSegment ) + { + segment_allocator().Delete( pSegment ); + } + + static void retire_segment( segment * pSegment ) + { + gc::template retire( pSegment ); + } + }; + //@endcond + + protected: + segment_list m_SegmentList; ///< List of segments + + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics + + public: + /// Initializes the empty queue + SegmentedQueue( + size_t nQuasiFactor ///< Quasi factor. If it is not a power of 2 it is rounded up to nearest power of 2. Minimum is 2. + ) + : m_SegmentList( cds::beans::ceil2(nQuasiFactor), m_Stat ) + { + static_assert( (!std::is_same< item_counter, cds::atomicity::empty_item_counter >::value), + "cds::atomicity::empty_item_counter is not supported for SegmentedQueue" + ); + assert( m_SegmentList.quasi_factor() > 1 ); + } + + /// Clears the queue and deletes all internal data + ~SegmentedQueue() + { + clear(); + } + + /// Inserts a new element at last segment of the queue + bool enqueue( value_type& val ) + { + // LSB is used as a flag in marked pointer + assert( (reinterpret_cast( &val ) & 1) == 0 ); + + typename gc::Guard segmentGuard; + segment * pTailSegment = m_SegmentList.tail( segmentGuard ); + if ( !pTailSegment ) { + // no segments, create the new one + pTailSegment = m_SegmentList.create_tail( pTailSegment, segmentGuard ); + assert( pTailSegment ); + } + + permutation_generator gen( quasi_factor() ); + + // First, increment item counter. + // We sure that the item will be enqueued + // but if we increment the counter after inserting we can get a negative counter value + // if dequeuing occurs before incrementing (enqueue/dequeue race) + ++m_ItemCounter; + + while ( true ) { + CDS_DEBUG_DO( size_t nLoopCount = 0); + do { + typename permutation_generator::integer_type i = gen; + CDS_DEBUG_DO( ++nLoopCount ); + if ( pTailSegment->cells[i].load(memory_model::memory_order_relaxed).all() ) { + // Cell is not empty, go next + m_Stat.onPushPopulated(); + } + else { + // Empty cell found, try to enqueue here + cell nullCell; + if ( pTailSegment->cells[i].compare_exchange_strong( nullCell, cell( &val ), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + { + // Ok to push item + m_Stat.onPush(); + return true; + } + assert( nullCell.ptr() ); + m_Stat.onPushContended(); + } + } while ( gen.next() ); + + assert( nLoopCount == quasi_factor()); + + // No available position, create a new segment + pTailSegment = m_SegmentList.create_tail( pTailSegment, segmentGuard ); + + // Get new permutation + gen.reset(); + } + } + + /// Removes an element from first segment of the queue and returns it + /** + If the queue is empty the function returns \p nullptr. + + The disposer specified in \p Traits template argument is not called for returned item. + You should manually dispose the item: + + struct my_disposer { + void operator()( foo * p ) + { + delete p; + } + }; + cds::intrusive::SegmentedQueue< cds::gc::HP, foo > theQueue; + // ... + + // Dequeue an item + foo * pItem = theQueue.dequeue(); + // deal with pItem + //... + + // pItem is not longer needed and can be deleted + // Do it via gc::HP::retire + cds::gc::HP::template retire< my_disposer >( pItem ); + + */ + value_type * dequeue() + { + typename gc::Guard itemGuard; + if ( do_dequeue( itemGuard )) { + value_type * pVal = itemGuard.template get(); + assert( pVal ); + return pVal; + } + return null_ptr(); + + } + + /// Synonym for \p enqueue(value_type&) member function + bool push( value_type& val ) + { + return enqueue( val ); + } + + /// Synonym for \p dequeue() member function + value_type * pop() + { + return dequeue(); + } + + /// Checks if the queue is empty + /** + The original segmented queue algorithm does not allow to check emptiness accurately + because \p empty() is unlinearizable. + This function tests queue's emptiness checking size() == 0, + so, the item counting feature is an essential part of queue's algorithm. + */ + bool empty() const + { + return size() == 0; + } + + /// Clear the queue + /** + The function repeatedly calls \ref dequeue until it returns \p nullptr. + The disposer specified in \p Traits template argument is called for each removed item. + */ + void clear() + { + clear_with( disposer() ); + } + + /// Clear the queue + /** + The function repeatedly calls \p dequeue() until it returns \p nullptr. + \p Disposer is called for each removed item. + */ + template + void clear_with( Disposer ) + { + typename gc::Guard itemGuard; + while ( do_dequeue( itemGuard ) ) { + assert( itemGuard.template get() ); + gc::template retire( itemGuard.template get() ); + itemGuard.clear(); + } + } + + /// Returns queue's item count + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns reference to internal statistics + /** + The type of internal statistics is specified by \p Traits template argument. + */ + const stat& statistics() const + { + return m_Stat; + } + + /// Returns quasi factor, a power-of-two number + size_t quasi_factor() const + { + return m_SegmentList.quasi_factor(); + } + + protected: + //@cond + bool do_dequeue( typename gc::Guard& itemGuard ) + { + typename gc::Guard segmentGuard; + segment * pHeadSegment = m_SegmentList.head( segmentGuard ); + + permutation_generator gen( quasi_factor() ); + while ( true ) { + if ( !pHeadSegment ) { + // Queue is empty + m_Stat.onPopEmpty(); + return false; + } + + bool bHadNullValue = false; + cell item; + CDS_DEBUG_DO( size_t nLoopCount = 0 ); + do { + typename permutation_generator::integer_type i = gen; + CDS_DEBUG_DO( ++nLoopCount ); + + // Guard the item + // In segmented queue the cell cannot be reused + // So no loop is needed here to protect the cell + item = pHeadSegment->cells[i].load( memory_model::memory_order_relaxed ); + itemGuard.assign( item.ptr() ); + + // Check if this cell is empty, which means an element + // can be enqueued to this cell in the future + if ( !item.ptr() ) + bHadNullValue = true; + else { + // If the item is not deleted yet + if ( !item.bits() ) { + // Try to mark the cell as deleted + if ( pHeadSegment->cells[i].compare_exchange_strong( item, item | 1, + memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + { + --m_ItemCounter; + m_Stat.onPop(); + + return true; + } + assert( item.bits() ); + m_Stat.onPopContended(); + } + } + } while ( gen.next() ); + + assert( nLoopCount == quasi_factor() ); + + // scanning the entire segment without finding a candidate to dequeue + // If there was an empty cell, the queue is considered empty + if ( bHadNullValue ) { + m_Stat.onPopEmpty(); + return false; + } + + // All nodes have been dequeued, we can safely remove the first segment + pHeadSegment = m_SegmentList.remove_head( pHeadSegment, segmentGuard ); + + // Get new permutation + gen.reset(); + } + } + //@endcond + }; +}} // namespace cds::intrusive + +#if CDS_COMPILER == CDS_COMPILER_MSVC +# pragma warning( pop ) +#endif + +#endif // #ifndef __CDS_INTRUSIVE_SEGMENTED_QUEUE_H diff --git a/cds/intrusive/single_link_struct.h b/cds/intrusive/single_link_struct.h new file mode 100644 index 00000000..c17f342e --- /dev/null +++ b/cds/intrusive/single_link_struct.h @@ -0,0 +1,227 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_SINGLE_LINK_STRUCT_H +#define __CDS_INTRUSIVE_SINGLE_LINK_STRUCT_H + +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// Definitions common for single-linked data structures + /** @ingroup cds_intrusive_helper + */ + namespace single_link { + + /// Container's node + /** + Template parameters: + - GC - garbage collector used + - Tag - a tag used to distinguish between different implementation + */ + template + struct node: public GC::container_node + { + typedef GC gc ; ///< Garbage collector + typedef Tag tag ; ///< tag + + typedef typename gc::template atomic_ref atomic_node_ptr ; ///< atomic pointer + + /// Rebind node for other template parameters + template + struct rebind { + typedef node other ; ///< Rebinding result + }; + + atomic_node_ptr m_pNext ; ///< pointer to the next node in the container + + node() + : m_pNext( null_ptr() ) + {} + }; + + //@cond + // Specialization for HRC GC + template + struct node< gc::HRC, Tag>: public gc::HRC::container_node + { + typedef gc::HRC gc ; ///< Garbage collector + typedef Tag tag ; ///< tag + + typedef gc::atomic_ref atomic_node_ptr ; ///< atomic pointer + atomic_node_ptr m_pNext ; ///< pointer to the next node in the container + + node() + : m_pNext(null_ptr()) + {} + + protected: + virtual void cleanUp( cds::gc::hrc::ThreadGC * pGC ) + { + assert( pGC != null_ptr() ); + typename gc::GuardArray<2> aGuards( *pGC ); + + while ( true ) { + node * pNext = aGuards.protect( 0, m_pNext ); + if ( pNext && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) { + node * p = aGuards.protect( 1, pNext->m_pNext ); + m_pNext.compare_exchange_strong( pNext, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ); + continue; + } + else { + break; + } + } + } + + virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent ) + { + if ( bConcurrent ) { + node * pNext = m_pNext.load(CDS_ATOMIC::memory_order_relaxed); + do {} while ( !m_pNext.compare_exchange_weak( pNext, null_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ); + } + else { + m_pNext.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + } + } + }; + //@endcond + + //@cond + struct default_hook { + typedef cds::gc::default_gc gc; + typedef opt::none tag; + }; + //@endcond + + //@cond + template < typename HookType, CDS_DECL_OPTIONS2> + struct hook + { + typedef typename opt::make_options< default_hook, CDS_OPTIONS2>::type options; + typedef typename options::gc gc; + typedef typename options::tag tag; + typedef node node_type; + typedef HookType hook_type; + }; + //@endcond + + + /// Base hook + /** + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - tag + */ + template < CDS_DECL_OPTIONS2 > + struct base_hook: public hook< opt::base_hook_tag, CDS_OPTIONS2 > + {}; + + /// Member hook + /** + \p MemberOffset defines offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - tag + */ + template < size_t MemberOffset, CDS_DECL_OPTIONS2 > + struct member_hook: public hook< opt::member_hook_tag, CDS_OPTIONS2 > + { + //@cond + static const size_t c_nMemberOffset = MemberOffset; + //@endcond + }; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - tag + */ + template + struct traits_hook: public hook< opt::traits_hook_tag, CDS_OPTIONS2 > + { + //@cond + typedef NodeTraits node_traits; + //@endcond + }; + + /// Check link + template + struct link_checker { + //@cond + typedef Node node_type; + //@endcond + + /// Checks if the link field of node \p pNode is NULL + /** + An asserting is generated if \p pNode link field is not NULL + */ + static void is_empty( const node_type * pNode ) + { + assert( pNode->m_pNext.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr() ); + } + }; + + //@cond + template + struct link_checker_selector; + + template + struct link_checker_selector< gc::HRC, Node, opt::never_check_link > + { + typedef link_checker type; + }; + + template + struct link_checker_selector< gc::HRC, Node, opt::debug_check_link > + { + typedef link_checker type; + }; + + template + struct link_checker_selector< GC, Node, opt::never_check_link > + { + typedef intrusive::opt::v::empty_link_checker type; + }; + + template + struct link_checker_selector< GC, Node, opt::debug_check_link > + { +# ifdef _DEBUG + typedef link_checker type; +# else + typedef intrusive::opt::v::empty_link_checker type; +# endif + }; + + template + struct link_checker_selector< GC, Node, opt::always_check_link > + { + typedef link_checker type; + }; + //@endcond + + /// Metafunction for selecting appropriate link checking policy + template < typename Node, opt::link_check_type LinkType > + struct get_link_checker + { + //@cond + typedef typename link_checker_selector< typename Node::gc, Node, LinkType>::type type; + //@endcond + }; + + } // namespace single_link + +}} // namespace cds::intrusive + + + +#endif // #ifndef __CDS_INTRUSIVE_SINGLE_LINK_STRUCT_H diff --git a/cds/intrusive/skip_list_base.h b/cds/intrusive/skip_list_base.h new file mode 100644 index 00000000..b715e619 --- /dev/null +++ b/cds/intrusive/skip_list_base.h @@ -0,0 +1,654 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_SKIP_LIST_BASE_H +#define __CDS_INTRUSIVE_SKIP_LIST_BASE_H + +#include +#include +#include +#include +#include + + +namespace cds { namespace intrusive { + /// SkipListSet related definitions + /** @ingroup cds_intrusive_helper + */ + namespace skip_list { + + /// The maximum possible height of any skip-list + static unsigned int const c_nHeightLimit = 32; + + /// Skip list node + /** + Template parameters: + - GC - garbage collector + - Tag - a tag used to distinguish between different implementation. An incomplete type may be used as a tag. + */ + template + class node { + public: + typedef GC gc ; ///< Garbage collector + typedef Tag tag ; ///< tag + + typedef cds::details::marked_ptr marked_ptr ; ///< marked pointer + typedef typename gc::template atomic_marked_ptr< marked_ptr> atomic_marked_ptr ; ///< atomic marked pointer specific for GC + //@cond + typedef atomic_marked_ptr tower_item_type; + //@endcond + + protected: + atomic_marked_ptr m_pNext ; ///< Next item in bottom-list (list at level 0) + unsigned int m_nHeight ; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1. + atomic_marked_ptr * m_arrNext ; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p NULL + + public: + /// Constructs a node of height 1 (a bottom-list node) + node() + : m_pNext( null_ptr()) + , m_nHeight(1) + , m_arrNext( null_ptr()) + {} + + /// Constructs a node of height \p nHeight + void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower ) + { + assert( nHeight > 0 ); + assert( ( nHeight == 1 && nextTower == null_ptr() ) // bottom-list node + || ( nHeight > 1 && nextTower != null_ptr() ) // node at level of more than 0 + ); + + m_arrNext = nextTower; + m_nHeight = nHeight; + } + + //@cond + atomic_marked_ptr * release_tower() + { + atomic_marked_ptr * pTower = m_arrNext; + m_arrNext = null_ptr(); + m_nHeight = 1; + return pTower; + } + + atomic_marked_ptr * get_tower() const + { + return m_arrNext; + } + //@endcond + + /// Access to element of next pointer array + atomic_marked_ptr& next( unsigned int nLevel ) + { + assert( nLevel < height() ); + assert( nLevel == 0 || (nLevel > 0 && m_arrNext != null_ptr() )); + + return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; + } + + /// Access to element of next pointer array (const version) + atomic_marked_ptr const& next( unsigned int nLevel ) const + { + assert( nLevel < height() ); + assert( nLevel == 0 || nLevel > 0 && m_arrNext != null_ptr() ); + + return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; + } + + /// Access to element of next pointer array (same as \ref next function) + atomic_marked_ptr& operator[]( unsigned int nLevel ) + { + return next( nLevel ); + } + + /// Access to element of next pointer array (same as \ref next function) + atomic_marked_ptr const& operator[]( unsigned int nLevel ) const + { + return next( nLevel ); + } + + /// Height of the node + unsigned int height() const + { + return m_nHeight; + } + + /// Clears internal links + void clear() + { + assert( m_arrNext == null_ptr()); + m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_release ); + } + + //@cond + bool is_cleared() const + { + return m_pNext == atomic_marked_ptr() + && m_arrNext == null_ptr() + && m_nHeight <= 1 +; + } + //@endcond + }; + + //@cond + struct undefined_gc; + struct default_hook { + typedef undefined_gc gc; + typedef opt::none tag; + }; + //@endcond + + //@cond + template < typename HookType, CDS_DECL_OPTIONS2> + struct hook + { + typedef typename opt::make_options< default_hook, CDS_OPTIONS2>::type options; + typedef typename options::gc gc; + typedef typename options::tag tag; + typedef node node_type; + typedef HookType hook_type; + }; + //@endcond + + /// Base hook + /** + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - a tag + */ + template < CDS_DECL_OPTIONS2 > + struct base_hook: public hook< opt::base_hook_tag, CDS_OPTIONS2 > + {}; + + /// Member hook + /** + \p MemberOffset defines offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - a tag + */ + template < size_t MemberOffset, CDS_DECL_OPTIONS2 > + struct member_hook: public hook< opt::member_hook_tag, CDS_OPTIONS2 > + { + //@cond + static const size_t c_nMemberOffset = MemberOffset; + //@endcond + }; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - a tag + */ + template + struct traits_hook: public hook< opt::traits_hook_tag, CDS_OPTIONS2 > + { + //@cond + typedef NodeTraits node_traits; + //@endcond + }; + + /// Option specifying random level generator + /** + The random level generator is an important part of skip-list algorithm. + The node height in the skip-list have a probabilistic distribution + where half of the nodes that have level \p i pointers also have level i+1 pointers + (i = 0..30). + The random level generator should provide such distribution. + + The \p Type functor interface is: + \code + struct random_generator { + static unsigned int const c_nUpperBound = 32; + random_generator(); + unsigned int operator()(); + }; + \endcode + + where + - \p c_nUpperBound - constant that specifies the upper bound of random number generated. + The generator produces a number from range [0 .. c_nUpperBound) (upper bound excluded). + \p c_nUpperBound must be no more than 32. + - random_generator() - the constructor of generator object initialises the generator instance (its internal state). + - unsigned int operator()() - the main generating function. Returns random level from range 0..31. + + Stateful generators are supported. + + Available \p Type implementations: + - \ref xorshift + - \ref turbo_pascal + */ + template + struct random_level_generator { + //@cond + template + struct pack: public Base + { + typedef Type random_level_generator; + }; + //@endcond + }; + + /// Xor-shift random level generator + /** + The simplest of the generators described in George + Marsaglia's "Xorshift RNGs" paper. This is not a high-quality + generator but is acceptable for skip-list. + + The random generator should return numbers from range [0..31]. + + From Doug Lea's ConcurrentSkipListMap.java. + */ + class xorshift { + //@cond + CDS_ATOMIC::atomic m_nSeed; + //@endcond + public: + /// The upper bound of generator's return value. The generator produces random number in range [0..c_nUpperBound) + static unsigned int const c_nUpperBound = c_nHeightLimit; + + /// Initializes the generator instance + xorshift() + { + m_nSeed.store( (unsigned int) cds::OS::Timer::random_seed(), CDS_ATOMIC::memory_order_relaxed ); + } + + /// Main generator function + unsigned int operator()() + { + /* ConcurrentSkipListMap.java + private int randomLevel() { + int x = randomSeed; + x ^= x << 13; + x ^= x >>> 17; + randomSeed = x ^= x << 5; + if ((x & 0x80000001) != 0) // test highest and lowest bits + return 0; + int level = 1; + while (((x >>>= 1) & 1) != 0) ++level; + return level; + } + */ + unsigned int x = m_nSeed.load( CDS_ATOMIC::memory_order_relaxed ); + x ^= x << 13; + x ^= x >> 17; + x ^= x << 5; + m_nSeed.store( x, CDS_ATOMIC::memory_order_relaxed ); + unsigned int nLevel = ((x & 0x00000001) != 0) ? 0 : cds::bitop::LSB( (~(x >> 1)) & 0x7FFFFFFF ); + assert( nLevel < c_nUpperBound ); + return nLevel; + } + }; + + /// Turbo-pascal random level generator + /** + This uses a cheap pseudo-random function that was used in Turbo Pascal. + + The random generator should return numbers from range [0..31]. + + From Doug Lea's ConcurrentSkipListMap.java. + */ + class turbo_pascal + { + //@cond + CDS_ATOMIC::atomic m_nSeed; + //@endcond + public: + /// The upper bound of generator's return value. The generator produces random number in range [0..c_nUpperBound) + static unsigned int const c_nUpperBound = c_nHeightLimit; + + /// Initializes the generator instance + turbo_pascal() + { + m_nSeed.store( (unsigned int) cds::OS::Timer::random_seed(), CDS_ATOMIC::memory_order_relaxed ); + } + + /// Main generator function + unsigned int operator()() + { + /* + private int randomLevel() { + int level = 0; + int r = randomSeed; + randomSeed = r * 134775813 + 1; + if (r < 0) { + while ((r <<= 1) > 0) + ++level; + } + return level; + } + */ + /* + The low bits are apparently not very random (the original used only + upper 16 bits) so we traverse from highest bit down (i.e., test + sign), thus hardly ever use lower bits. + */ + unsigned int x = m_nSeed.load( CDS_ATOMIC::memory_order_relaxed ) * 134775813 + 1; + m_nSeed.store( x, CDS_ATOMIC::memory_order_relaxed ); + unsigned int nLevel = ( x & 0x80000000 ) ? (31 - cds::bitop::MSBnz( (x & 0x7FFFFFFF) | 1 )) : 0; + assert( nLevel < c_nUpperBound ); + return nLevel; + } + }; + + /// SkipListSet internal statistics + template + struct stat { + typedef EventCounter event_counter ; ///< Event counter type + + event_counter m_nNodeHeightAdd[c_nHeightLimit] ; ///< Count of added node of each height + event_counter m_nNodeHeightDel[c_nHeightLimit] ; ///< Count of deleted node of each height + event_counter m_nInsertSuccess ; ///< Count of success insertion + event_counter m_nInsertFailed ; ///< Count of failed insertion + event_counter m_nInsertRetries ; ///< Count of unsuccessful retries of insertion + event_counter m_nEnsureExist ; ///< Count of \p ensure call for existed node + event_counter m_nEnsureNew ; ///< Count of \p ensure call for new node + event_counter m_nUnlinkSuccess ; ///< Count of successful call of \p unlink + event_counter m_nUnlinkFailed ; ///< Count of failed call of \p unlink + event_counter m_nEraseSuccess ; ///< Count of successful call of \p erase + event_counter m_nEraseFailed ; ///< Count of failed call of \p erase + event_counter m_nFindFastSuccess ; ///< Count of successful call of \p find and all derivatives (via fast-path) + event_counter m_nFindFastFailed ; ///< Count of failed call of \p find and all derivatives (via fast-path) + event_counter m_nFindSlowSuccess ; ///< Count of successful call of \p find and all derivatives (via slow-path) + event_counter m_nFindSlowFailed ; ///< Count of failed call of \p find and all derivatives (via slow-path) + event_counter m_nRenewInsertPosition ; ///< Count of renewing position events while inserting + event_counter m_nLogicDeleteWhileInsert ; ///< Count of events "The node has been logically deleted while inserting" + event_counter m_nNotFoundWhileInsert ; ///< Count of events "Inserting node is not found" + event_counter m_nFastErase ; ///< Fast erase event counter + event_counter m_nFastExtract ; ///< Fast extract event counter + event_counter m_nSlowErase ; ///< Slow erase event counter + event_counter m_nSlowExtract ; ///< Slow extract event counter + event_counter m_nExtractSuccess ; ///< Count of successful call of \p extract + event_counter m_nExtractFailed ; ///< Count of failed call of \p extract + event_counter m_nExtractRetries ; ///< Count of retries of \p extract call + event_counter m_nExtractMinSuccess ; ///< Count of successful call of \p extract_min + event_counter m_nExtractMinFailed ; ///< Count of failed call of \p extract_min + event_counter m_nExtractMinRetries ; ///< Count of retries of \p extract_min call + event_counter m_nExtractMaxSuccess ; ///< Count of successful call of \p extract_max + event_counter m_nExtractMaxFailed ; ///< Count of failed call of \p extract_max + event_counter m_nExtractMaxRetries ; ///< Count of retries of \p extract_max call + event_counter m_nEraseWhileFind ; ///< Count of erased item while searching + event_counter m_nExtractWhileFind ; ///< Count of extracted item while searching (RCU only) + + //@cond + void onAddNode( unsigned int nHeight ) + { + assert( nHeight > 0 && nHeight <= sizeof(m_nNodeHeightAdd) / sizeof(m_nNodeHeightAdd[0])); + ++m_nNodeHeightAdd[nHeight - 1]; + } + void onRemoveNode( unsigned int nHeight ) + { + assert( nHeight > 0 && nHeight <= sizeof(m_nNodeHeightDel) / sizeof(m_nNodeHeightDel[0])); + ++m_nNodeHeightDel[nHeight - 1]; + } + + void onInsertSuccess() { ++m_nInsertSuccess ; } + void onInsertFailed() { ++m_nInsertFailed ; } + void onInsertRetry() { ++m_nInsertRetries ; } + void onEnsureExist() { ++m_nEnsureExist ; } + void onEnsureNew() { ++m_nEnsureNew ; } + void onUnlinkSuccess() { ++m_nUnlinkSuccess ; } + void onUnlinkFailed() { ++m_nUnlinkFailed ; } + void onEraseSuccess() { ++m_nEraseSuccess ; } + void onEraseFailed() { ++m_nEraseFailed ; } + void onFindFastSuccess() { ++m_nFindFastSuccess ; } + void onFindFastFailed() { ++m_nFindFastFailed ; } + void onFindSlowSuccess() { ++m_nFindSlowSuccess ; } + void onFindSlowFailed() { ++m_nFindSlowFailed ; } + void onEraseWhileFind() { ++m_nEraseWhileFind ; } + void onExtractWhileFind() { ++m_nExtractWhileFind ; } + void onRenewInsertPosition() { ++m_nRenewInsertPosition; } + void onLogicDeleteWhileInsert() { ++m_nLogicDeleteWhileInsert; } + void onNotFoundWhileInsert() { ++m_nNotFoundWhileInsert; } + void onFastErase() { ++m_nFastErase; } + void onFastExtract() { ++m_nFastExtract; } + void onSlowErase() { ++m_nSlowErase; } + void onSlowExtract() { ++m_nSlowExtract; } + void onExtractSuccess() { ++m_nExtractSuccess; } + void onExtractFailed() { ++m_nExtractFailed; } + void onExtractRetry() { ++m_nExtractRetries; } + void onExtractMinSuccess() { ++m_nExtractMinSuccess; } + void onExtractMinFailed() { ++m_nExtractMinFailed; } + void onExtractMinRetry() { ++m_nExtractMinRetries; } + void onExtractMaxSuccess() { ++m_nExtractMaxSuccess; } + void onExtractMaxFailed() { ++m_nExtractMaxFailed; } + void onExtractMaxRetry() { ++m_nExtractMaxRetries; } + + //@endcond + }; + + /// SkipListSet empty internal statistics + struct empty_stat { + //@cond + void onAddNode( unsigned int nHeight ) const {} + void onRemoveNode( unsigned int nHeight ) const {} + void onInsertSuccess() const {} + void onInsertFailed() const {} + void onInsertRetry() const {} + void onEnsureExist() const {} + void onEnsureNew() const {} + void onUnlinkSuccess() const {} + void onUnlinkFailed() const {} + void onEraseSuccess() const {} + void onEraseFailed() const {} + void onFindFastSuccess() const {} + void onFindFastFailed() const {} + void onFindSlowSuccess() const {} + void onFindSlowFailed() const {} + void onEraseWhileFind() const {} + void onExtractWhileFind() const {} + void onRenewInsertPosition() const {} + void onLogicDeleteWhileInsert() const {} + void onNotFoundWhileInsert() const {} + void onFastErase() const {} + void onFastExtract() const {} + void onSlowErase() const {} + void onSlowExtract() const {} + void onExtractSuccess() const {} + void onExtractFailed() const {} + void onExtractRetry() const {} + void onExtractMinSuccess() const {} + void onExtractMinFailed() const {} + void onExtractMinRetry() const {} + void onExtractMaxSuccess() const {} + void onExtractMaxFailed() const {} + void onExtractMaxRetry() const {} + + //@endcond + }; + + //@cond + // For internal use only!!! + template + struct internal_node_builder { + template + struct pack: public Base + { + typedef Type internal_node_builder; + }; + }; + //@endcond + + /// Type traits for SkipListSet class + struct type_traits + { + /// Hook used + /** + Possible values are: skip_list::base_hook, skip_list::member_hook, skip_list::traits_hook. + */ + typedef base_hook<> hook; + + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// specifies binary predicate used for key compare. + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// Disposer + /** + The functor used for dispose removed items. Default is opt::v::empty_disposer. + */ + typedef opt::v::empty_disposer disposer; + + /// Item counter + /** + The type for item counting feature. + Default is no item counter (\ref atomicity::empty_item_counter) + */ + typedef atomicity::empty_item_counter item_counter; + + /// C++ memory ordering model + /** + List of available memory ordering see opt::memory_model + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Random level generator + /** + The random level generator is an important part of skip-list algorithm. + The node height in the skip-list have a probabilistic distribution + where half of the nodes that have level \p i pointers also have level i+1 pointers + (i = 0..30). So, the height of a node is in range [0..31]. + + See skip_list::random_level_generator option setter. + */ + typedef turbo_pascal random_level_generator; + + /// Allocator + /** + Although the skip-list is an intrusive container, + an allocator should be provided to maintain variable randomly-calculated height of the node + since the node can contain up to 32 next pointers. + The allocator specified is used to allocate an array of next pointers + for nodes which height is more than 1. + */ + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// back-off strategy used + /** + If the option is not specified, the cds::backoff::Default is used. + */ + typedef cds::backoff::Default back_off; + + /// Internal statistics + typedef empty_stat stat; + + /// RCU deadlock checking policy (only for \ref cds_intrusive_SkipListSet_rcu "RCU-based SkipListSet") + /** + List of available options see opt::rcu_check_deadlock + */ + typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; + + //@cond + // For internal use only!!! + typedef opt::none internal_node_builder; + //@endcond + }; + + /// Metafunction converting option list to SkipListSet traits + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + \p Options list see \ref SkipListSet. + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS13 >::type + ,CDS_OPTIONS13 + >::type type; +# endif + }; + + //@cond + namespace details { + template + class head_node: public Node + { + typedef Node node_type; + + typename node_type::atomic_marked_ptr m_Tower[skip_list::c_nHeightLimit]; + + public: + head_node( unsigned int nHeight ) + { + for ( size_t i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i ) + m_Tower[i].store( typename node_type::marked_ptr(), CDS_ATOMIC::memory_order_relaxed ); + + node_type::make_tower( nHeight, m_Tower ); + } + + node_type * head() const + { + return const_cast( static_cast(this)); + } + }; + + template + struct intrusive_node_builder + { + typedef NodeType node_type; + typedef AtomicNodePtr atomic_node_ptr; + typedef Alloc allocator_type; + + typedef cds::details::Allocator< atomic_node_ptr, allocator_type > tower_allocator; + + template + static node_type * make_tower( node_type * pNode, RandomGen& gen ) + { + return make_tower( pNode, gen() + 1 ); + } + + static node_type * make_tower( node_type * pNode, unsigned int nHeight ) + { + if ( nHeight > 1 ) + pNode->make_tower( nHeight, tower_allocator().NewArray( nHeight - 1, null_ptr() )); + return pNode; + } + + static void dispose_tower( node_type * pNode ) + { + unsigned int nHeight = pNode->height(); + if ( nHeight > 1 ) + tower_allocator().Delete( pNode->release_tower(), nHeight ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + dispose_tower( pNode ); + } + }; + }; + + // Forward declaration + template + class iterator; + + } // namespace details + //@endcond + + } // namespace skip_list + + // Forward declaration + template + class SkipListSet; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_SKIP_LIST_BASE_H diff --git a/cds/intrusive/skip_list_hp.h b/cds/intrusive/skip_list_hp.h new file mode 100644 index 00000000..2df5fd33 --- /dev/null +++ b/cds/intrusive/skip_list_hp.h @@ -0,0 +1,9 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_SKIP_LIST_HP_H +#define __CDS_INTRUSIVE_SKIP_LIST_HP_H + +#include +#include + +#endif diff --git a/cds/intrusive/skip_list_hrc.h b/cds/intrusive/skip_list_hrc.h new file mode 100644 index 00000000..afd4df43 --- /dev/null +++ b/cds/intrusive/skip_list_hrc.h @@ -0,0 +1,201 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_SKIP_LIST_HRC_H +#define __CDS_INTRUSIVE_SKIP_LIST_HRC_H + +#include +#include + +//@cond +namespace cds { namespace intrusive { namespace skip_list { + // Specialization for HRC GC + template + class node< cds::gc::HRC, Tag>: public cds::gc::HRC::container_node + { + public: + typedef gc::HRC gc ; ///< Garbage collector + typedef Tag tag ; ///< tag + + typedef cds::details::marked_ptr marked_ptr ; ///< marked pointer + typedef typename gc::template atomic_marked_ptr< marked_ptr> atomic_marked_ptr ; ///< atomic marked pointer specific for GC + typedef atomic_marked_ptr tower_item_type; + + protected: + atomic_marked_ptr m_pNext ; ///< Next item in bottom-list (list at level 0) + unsigned int m_nHeight ; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1. + atomic_marked_ptr * m_arrNext ; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p NULL + + public: + bool m_bDel; + + public: + /// Constructs a node of height 1 (a bottom-list node) + node() + : m_pNext( null_ptr()) + , m_nHeight(1) + , m_arrNext( null_ptr()) + , m_bDel( false ) + {} + + ~node() + { + release_tower(); + m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed ); + } + + /// Constructs a node of height \p nHeight + void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower ) + { + assert( nHeight > 0 ); + assert( ( nHeight == 1 && nextTower == null_ptr() ) // bottom-list node + || ( nHeight > 1 && nextTower != null_ptr() ) // node at level of more than 0 + ); + + m_arrNext = nextTower; + m_nHeight = nHeight; + } + + atomic_marked_ptr * release_tower() + { + unsigned int nHeight = m_nHeight - 1; + atomic_marked_ptr * pTower = m_arrNext; + if ( pTower ) { + m_arrNext = null_ptr(); + m_nHeight = 1; + for ( unsigned int i = 0; i < nHeight; ++i ) + pTower[i].store( marked_ptr(), CDS_ATOMIC::memory_order_release ); + } + return pTower; + } + + atomic_marked_ptr * get_tower() const + { + return m_arrNext; + } + + /// Access to element of next pointer array + atomic_marked_ptr& next( unsigned int nLevel ) + { + assert( nLevel < height() ); + assert( nLevel == 0 || (nLevel > 0 && m_arrNext != null_ptr() )); + + return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; + } + + /// Access to element of next pointer array (const version) + atomic_marked_ptr const& next( unsigned int nLevel ) const + { + assert( nLevel < height() ); + assert( nLevel == 0 || (nLevel > 0 && m_arrNext != null_ptr()) ); + + return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; + } + + /// Access to element of next pointer array (same as \ref next function) + atomic_marked_ptr& operator[]( unsigned int nLevel ) + { + return next( nLevel ); + } + + /// Access to element of next pointer array (same as \ref next function) + atomic_marked_ptr const& operator[]( unsigned int nLevel ) const + { + return next( nLevel ); + } + + /// Height of the node + unsigned int height() const + { + return m_nHeight; + } + + protected: + virtual void cleanUp( cds::gc::hrc::ThreadGC * pGC ) + { + assert( pGC != NULL ); + typename gc::GuardArray<2> aGuards( *pGC ); + + unsigned int const nHeight = height(); + for (unsigned int i = 0; i < nHeight; ++i ) { + while ( true ) { + marked_ptr pNextMarked( aGuards.protect( 0, next(i) )); + node * pNext = pNextMarked.ptr(); + if ( pNext && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) { + marked_ptr p = aGuards.protect( 1, pNext->next(i) ); + next(i).compare_exchange_strong( pNextMarked, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ); + continue; + } + else { + break; + } + } + } + } + + virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent ) + { + unsigned int const nHeight = height(); + if ( bConcurrent ) { + for (unsigned int i = 0; i < nHeight; ++i ) { + marked_ptr pNext = next(i).load(CDS_ATOMIC::memory_order_relaxed); + while ( !next(i).compare_exchange_weak( pNext, marked_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ); + } + } + else { + for (unsigned int i = 0; i < nHeight; ++i ) + next(i).store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed ); + } + } + }; + + namespace details { + + template + class head_node< node< cds::gc::HRC, Tag > > + { + typedef node< cds::gc::HRC, Tag > node_type; + + struct head_tower: public node_type + { + typename node_type::atomic_marked_ptr m_Tower[skip_list::c_nHeightLimit]; + }; + + head_tower * m_pHead; + + struct head_disposer { + void operator()( head_tower * p ) + { + delete p; + } + }; + public: + head_node( unsigned int nHeight ) + : m_pHead( new head_tower() ) + { + for ( size_t i = 0; i < sizeof(m_pHead->m_Tower) / sizeof(m_pHead->m_Tower[0]); ++i ) + m_pHead->m_Tower[i].store( typename node_type::marked_ptr(), CDS_ATOMIC::memory_order_relaxed ); + + m_pHead->make_tower( nHeight, m_pHead->m_Tower ); + } + + ~head_node() + { + cds::gc::HRC::template retire( m_pHead ); + } + + node_type * head() + { + return static_cast( m_pHead ); + } + node_type const * head() const + { + return static_cast( m_pHead ); + } + + }; + } // namespace details + +}}} // namespace cds::intrusive::skip_list +//@endcond + +#endif diff --git a/cds/intrusive/skip_list_impl.h b/cds/intrusive/skip_list_impl.h new file mode 100644 index 00000000..bda9e0fa --- /dev/null +++ b/cds/intrusive/skip_list_impl.h @@ -0,0 +1,1749 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_SKIP_LIST_IMPL_H +#define __CDS_INTRUSIVE_SKIP_LIST_IMPL_H + +#include +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + //@cond + namespace skip_list { namespace details { + + template + class iterator { + public: + typedef GC gc; + typedef NodeTraits node_traits; + typedef BackOff back_off; + typedef typename node_traits::node_type node_type; + typedef typename node_traits::value_type value_type; + static bool const c_isConst = IsConst; + + typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref; + + protected: + typedef typename node_type::marked_ptr marked_ptr; + typedef typename node_type::atomic_marked_ptr atomic_marked_ptr; + + typename gc::Guard m_guard; + node_type * m_pNode; + + protected: + static value_type * gc_protect( marked_ptr p ) + { + return node_traits::to_value_ptr( p.ptr() ); + } + + void next() + { + typename gc::Guard g; + g.copy( m_guard ); + back_off bkoff; + + for (;;) { + if ( m_pNode->next( m_pNode->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) { + // Current node is marked as deleted. So, its next pointer can point to anything + // In this case we interrupt our iteration and returns end() iterator. + *this = iterator(); + return; + } + + marked_ptr p = m_guard.protect( (*m_pNode)[0], gc_protect ); + node_type * pp = p.ptr(); + if ( p.bits() ) { + // p is marked as deleted. Spin waiting for physical removal + bkoff(); + continue; + } + else if ( pp && pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_relaxed ).bits() ) { + // p is marked as deleted. Spin waiting for physical removal + bkoff(); + continue; + } + + m_pNode = pp; + break; + } + } + + public: // for internal use only!!! + iterator( node_type& refHead ) + : m_pNode( null_ptr() ) + { + back_off bkoff; + + for (;;) { + marked_ptr p = m_guard.protect( refHead[0], gc_protect ); + if ( !p.ptr() ) { + // empty skip-list + m_guard.clear(); + break; + } + + node_type * pp = p.ptr(); + // Logically deleted node is marked from highest level + if ( !pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) { + m_pNode = pp; + break; + } + + bkoff(); + } + } + + public: + iterator() + : m_pNode( null_ptr()) + {} + + iterator( iterator const& s) + : m_pNode( s.m_pNode ) + { + m_guard.assign( node_traits::to_value_ptr(m_pNode) ); + } + + value_type * operator ->() const + { + assert( m_pNode != null_ptr< node_type *>() ); + assert( node_traits::to_value_ptr( m_pNode ) != null_ptr() ); + + return node_traits::to_value_ptr( m_pNode ); + } + + value_ref operator *() const + { + assert( m_pNode != null_ptr< node_type *>() ); + assert( node_traits::to_value_ptr( m_pNode ) != null_ptr() ); + + return *node_traits::to_value_ptr( m_pNode ); + } + + /// Pre-increment + iterator& operator ++() + { + next(); + return *this; + } + + iterator& operator = (const iterator& src) + { + m_pNode = src.m_pNode; + m_guard.copy( src.m_guard ); + return *this; + } + + template + bool operator ==(iterator const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator const& i ) const + { + return !( *this == i ); + } + }; + }} // namespace skip_list::details + //@endcond + + /// Lock-free skip-list set + /** @ingroup cds_intrusive_map + @anchor cds_intrusive_SkipListSet_hp + + The implementation of well-known probabilistic data structure called skip-list + invented by W.Pugh in his papers: + - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees + - [1990] W.Pugh A Skip List Cookbook + + A skip-list is a probabilistic data structure that provides expected logarithmic + time search without the need of rebalance. The skip-list is a collection of sorted + linked list. Nodes are ordered by key. Each node is linked into a subset of the lists. + Each list has a level, ranging from 0 to 32. The bottom-level list contains + all the nodes, and each higher-level list is a sublist of the lower-level lists. + Each node is created with a random top level (with a random height), and belongs + to all lists up to that level. The probability that a node has the height 1 is 1/2. + The probability that a node has the height N is 1/2 ** N (more precisely, + the distribution depends on an random generator provided, but our generators + have this property). + + The lock-free variant of skip-list is implemented according to book + - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming", + chapter 14.4 "A Lock-Free Concurrent Skiplist". + \note The algorithm described in this book cannot be directly adapted for C++ (roughly speaking, + the algo contains a lot of bugs). The \b libcds implementation applies the approach discovered + by M.Michael in his \ref cds_intrusive_MichaelList_hp "lock-free linked list". + + Template arguments: + - \p GC - Garbage collector used. Note the \p GC must be the same as the GC used for item type \p T (see skip_list::node). + - \p T - type to be stored in the list. The type must be based on skip_list::node (for skip_list::base_hook) + or it must have a member of type skip_list::node (for skip_list::member_hook). + - \p Traits - type traits. See skip_list::type_traits for explanation. + + It is possible to declare option-based list with cds::intrusive::skip_list::make_traits metafunction istead of \p Traits template + argument. + Template argument list \p Options of cds::intrusive::skip_list::make_traits metafunction are: + - opt::hook - hook used. Possible values are: skip_list::base_hook, skip_list::member_hook, skip_list::traits_hook. + If the option is not specified, skip_list::base_hook<> and gc::HP is used. + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::disposer - the functor used for dispose removed items. Default is opt::v::empty_disposer. Due the nature + of GC schema the disposer may be called asynchronously. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - skip_list::random_level_generator - random level generator. Can be skip_list::xorshift, skip_list::turbo_pascal or + user-provided one. See skip_list::random_level_generator option description for explanation. + Default is \p %skip_list::turbo_pascal. + - opt::allocator - although the skip-list is an intrusive container, + an allocator should be provided to maintain variable randomly-calculated height of the node + since the node can contain up to 32 next pointers. The allocator option is used to allocate an array of next pointers + for nodes which height is more than 1. Default is \ref CDS_DEFAULT_ALLOCATOR. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::Default is used. + - opt::stat - internal statistics. Available types: skip_list::stat, skip_list::empty_stat (the default) + + \warning The skip-list requires up to 67 hazard pointers that may be critical for some GCs for which + the guard count is limited (like as gc::HP, gc::HRC). Those GCs should be explicitly initialized with + hazard pointer enough: \code cds::gc::HP myhp( 67 ) \endcode. Otherwise an run-time exception may be raised + when you try to create skip-list object. + + \note There are several specializations of \p %SkipListSet for each \p GC. You should include: + - for gc::HP garbage collector + - for gc::HRC garbage collector + - for gc::PTB garbage collector + - for \ref cds_intrusive_SkipListSet_nogc for persistent set + - for \ref cds_intrusive_SkipListSet_rcu "RCU type" + + Iterators + + The class supports a forward iterator (\ref iterator and \ref const_iterator). + The iteration is ordered. + The iterator object is thread-safe: the element pointed by the iterator object is guarded, + so, the element cannot be reclaimed while the iterator object is alive. + However, passing an iterator object between threads is dangerous. + + \warning Due to concurrent nature of skip-list set it is not guarantee that you can iterate + all elements in the set: any concurrent deletion can exclude the element + pointed by the iterator from the set, and your iteration can be terminated + before end of the set. Therefore, such iteration is more suitable for debugging purpose only + + Remember, each iterator object requires 2 additional hazard pointers, that may be + a limited resource for \p GC like as gc::HP and gc::HRC (for gc::PTB the count of + guards is unlimited). + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced. + + How to use + + You should incorporate skip_list::node into your struct \p T and provide + appropriate skip_list::type_traits::hook in your \p Traits template parameters. Usually, for \p Traits you + define a struct based on skip_list::type_traits. + + Example for gc::HP and base hook: + \code + // Include GC-related skip-list specialization + #include + + // Data stored in skip list + struct my_data: public cds::intrusive::skip_list::node< cds::gc::HP > + { + // key field + std::string strKey; + + // other data + // ... + }; + + // my_data compare functor + struct my_data_cmp { + int operator()( const my_data& d1, const my_data& d2 ) + { + return d1.strKey.compare( d2.strKey ); + } + + int operator()( const my_data& d, const std::string& s ) + { + return d.strKey.compare(s); + } + + int operator()( const std::string& s, const my_data& d ) + { + return s.compare( d.strKey ); + } + }; + + + // Declare type_traits + struct my_traits: public cds::intrusive::skip_list::type_traits + { + typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< cds::gc::HP > > hook; + typedef my_data_cmp compare; + }; + + // Declare skip-list set type + typedef cds::intrusive::SkipListSet< cds::gc::HP, my_data, my_traits > traits_based_set; + \endcode + + Equivalent option-based code: + \code + // GC-related specialization + #include + + struct my_data { + // see above + }; + struct compare { + // see above + }; + + // Declare option-based skip-list set + typedef cds::intrusive::SkipListSet< cds::gc::HP + ,my_data + , typename cds::intrusive::skip_list::make_traits< + cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< cds::gc::HP > > > + ,cds::intrusive::opt::compare< my_data_cmp > + >::type + > option_based_set; + + \endcode + */ + template < + class GC + ,typename T +#ifdef CDS_DOXYGEN_INVOKED + ,typename Traits = skip_list::type_traits +#else + ,typename Traits +#endif + > + class SkipListSet + { + public: + typedef T value_type ; ///< type of value stored in the skip-list + typedef Traits options ; ///< Traits template parameter + + typedef typename options::hook hook ; ///< hook type + typedef typename hook::node_type node_type ; ///< node type + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter. +# else + typedef typename opt::details::make_comparator< value_type, options >::type key_comparator; +# endif + + typedef typename options::disposer disposer ; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + + typedef GC gc ; ///< Garbage collector + typedef typename options::item_counter item_counter ; ///< Item counting policy used + typedef typename options::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename options::random_level_generator random_level_generator ; ///< random level generator + typedef typename options::allocator allocator_type ; ///< allocator for maintaining array of next pointers of the node + typedef typename options::back_off back_off ; ///< Back-off strategy + typedef typename options::stat stat ; ///< internal statistics type + + public: + typedef cds::gc::guarded_ptr< gc, value_type > guarded_ptr; ///< Guarded pointer + + /// Max node height. The actual node height should be in range [0 .. c_nMaxHeight) + /** + The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound + but it should be no more than 32 (\ref skip_list::c_nHeightLimit). + */ + static unsigned int const c_nMaxHeight = std::conditional< + (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit), + std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >, + std::integral_constant< unsigned int, skip_list::c_nHeightLimit > + >::type::value; + + //@cond + static unsigned int const c_nMinHeight = 5; + //@endcond + + protected: + typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic marked node pointer + typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer + + protected: + //@cond + typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder; + + typedef typename std::conditional< + std::is_same< typename options::internal_node_builder, cds::opt::none >::value + ,intrusive_node_builder + ,typename options::internal_node_builder + >::type node_builder; + + typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr; + + // c_nMaxHeight * 2 - pPred/pSucc guards + // + 1 - for erase, unlink + // + 1 - for clear + static size_t const c_nHazardPtrCount = c_nMaxHeight * 2 + 2; + struct position { + node_type * pPrev[ c_nMaxHeight ]; + node_type * pSucc[ c_nMaxHeight ]; + + typename gc::template GuardArray< c_nMaxHeight * 2 > guards ; ///< Guards array for pPrev/pSucc + + node_type * pCur ; // guarded by guards; needed only for *ensure* function + }; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_insert_functor { + void operator()( value_type& ) + {} + }; + + struct empty_erase_functor { + void operator()( value_type const& ) + {} + }; + + struct empty_find_functor { + template + void operator()( value_type& item, Q& val ) + {} + }; + + struct get_functor { + typename gc::Guard& m_guard; + + get_functor( typename gc::Guard& gp ) + : m_guard(gp) + {} + + template + void operator()( value_type& item, Q& val ) + { + m_guard.assign( &item ); + } + }; + + template + struct insert_at_ensure_functor { + Func m_func; + insert_at_ensure_functor( Func f ) : m_func(f) {} + + void operator()( value_type& item ) + { + cds::unref( m_func)( true, item, item ); + } + }; + + struct copy_value_functor { + template + void operator()( Q& dest, value_type const& src ) const + { + dest = src; + } + }; + + struct dummy_copy_functor { + template + void operator()( Q&, value_type const&) const {} + }; +# endif // ifndef CDS_CXX11_LAMBDA_SUPPORT + + //@endcond + + protected: + skip_list::details::head_node< node_type > m_Head ; ///< head tower (max height) + + item_counter m_ItemCounter ; ///< item counter + random_level_generator m_RandomLevelGen ; ///< random level generator instance + CDS_ATOMIC::atomic m_nHeight ; ///< estimated high level + mutable stat m_Stat ; ///< internal statistics + + protected: + //@cond + unsigned int random_level() + { + // Random generator produces a number from range [0..31] + // We need a number from range [1..32] + return m_RandomLevelGen() + 1; + } + + template + node_type * build_node( Q v ) + { + return node_builder::make_tower( v, m_RandomLevelGen ); + } + + static value_type * gc_protect( marked_node_ptr p ) + { + return node_traits::to_value_ptr( p.ptr() ); + } + + static void dispose_node( value_type * pVal ) + { + assert( pVal != null_ptr() ); + typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) ); + disposer()( pVal ); + } + + template + bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound ) + { + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; + + // Hazard pointer array: + // pPred: [nLevel * 2] + // pSucc: [nLevel * 2 + 1] + + retry: + pPred = m_Head.head(); + int nCmp = 1; + + for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { + pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred )); + while ( true ) { + pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect ); + if ( pCur.bits() ) { + // pCur.bits() means that pPred is logically deleted + goto retry; + } + + if ( pCur.ptr() == null_ptr()) { + // end of the list at level nLevel - goto next level + break; + } + + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed ); + + if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() ) + goto retry; + + if ( pSucc.bits() ) { + // pCur is marked, i.e. logically deleted. + marked_node_ptr p( pCur.ptr() ); + if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + { + if ( nLevel == 0 ) { + gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node ); + m_Stat.onEraseWhileFind(); + } + } + goto retry; + } + else { + nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); + if ( nCmp < 0 ) { + pPred = pCur.ptr(); + pos.guards.copy( nLevel * 2, nLevel * 2 + 1 ) ; // pPrev guard := cur guard + } + else if ( nCmp == 0 && bStopIfFound ) + goto found; + else + break; + } + } + + // Next level + pos.pPrev[ nLevel ] = pPred; + pos.pSucc[ nLevel ] = pCur.ptr(); + } + + if ( nCmp != 0 ) + return false; + + found: + pos.pCur = pCur.ptr(); + return pCur.ptr() && nCmp == 0; + } + + bool find_min_position( position& pos ) + { + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; + + // Hazard pointer array: + // pPred: [nLevel * 2] + // pSucc: [nLevel * 2 + 1] + + retry: + pPred = m_Head.head(); + + for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { + pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred )); + pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect ); + + // pCur.bits() means that pPred is logically deleted + // head cannot be deleted + assert( pCur.bits() == 0 ); + + if ( pCur.ptr() ) { + + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed ); + + if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() ) + goto retry; + + if ( pSucc.bits() ) { + // pCur is marked, i.e. logically deleted. + marked_node_ptr p( pCur.ptr() ); + if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + { + if ( nLevel == 0 ) + gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node ); + } + goto retry; + } + } + + // Next level + pos.pPrev[ nLevel ] = pPred; + pos.pSucc[ nLevel ] = pCur.ptr(); + } + + return (pos.pCur = pCur.ptr()) != null_ptr(); + } + + bool find_max_position( position& pos ) + { + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; + + // Hazard pointer array: + // pPred: [nLevel * 2] + // pSucc: [nLevel * 2 + 1] + + retry: + pPred = m_Head.head(); + + for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { + pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred )); + while ( true ) { + pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect ); + if ( pCur.bits() ) { + // pCur.bits() means that pPred is logically deleted + goto retry; + } + + if ( pCur.ptr() == null_ptr()) { + // end of the list at level nLevel - goto next level + break; + } + + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed ); + + if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() ) + goto retry; + + if ( pSucc.bits() ) { + // pCur is marked, i.e. logically deleted. + marked_node_ptr p( pCur.ptr() ); + if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + { + if ( nLevel == 0 ) + gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node ); + } + goto retry; + } + else { + if ( !pSucc.ptr() ) + break; + + pPred = pCur.ptr(); + pos.guards.copy( nLevel * 2, nLevel * 2 + 1 ); // pPrev guard := cur guard + //pos.guards.copy( nLevel * 2, gCur ) ; // pPrev guard := gCur + } + } + + // Next level + pos.pPrev[ nLevel ] = pPred; + pos.pSucc[ nLevel ] = pCur.ptr(); + } + + return (pos.pCur = pCur.ptr()) != null_ptr(); + } + + template + bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f ) + { + unsigned int nHeight = pNode->height(); + + for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) + pNode->next(nLevel).store( marked_node_ptr(), memory_model::memory_order_relaxed ); + + { + marked_node_ptr p( pos.pSucc[0] ); + pNode->next( 0 ).store( p, memory_model::memory_order_release ); + if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) { + return false; + } + cds::unref( f )( val ); + } + + for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) { + marked_node_ptr p; + while ( true ) { + marked_node_ptr q( pos.pSucc[ nLevel ]); + if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { + // pNode has been marked as removed while we are inserting it + // Stop inserting + assert( p.bits() ); + m_Stat.onLogicDeleteWhileInsert(); + return true; + } + p = q; + if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) + break; + + // Renew insert position + m_Stat.onRenewInsertPosition(); + if ( !find_position( val, pos, key_comparator(), false )) { + // The node has been deleted while we are inserting it + m_Stat.onNotFoundWhileInsert(); + return true; + } + } + } + return true; + } + + template + bool try_remove_at( node_type * pDel, position& pos, Func f ) + { + assert( pDel != null_ptr()); + + marked_node_ptr pSucc; + typename gc::Guard gSucc; + + // logical deletion (marking) + for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) { + while ( true ) { + pSucc = gSucc.protect( pDel->next(nLevel), gc_protect ); + if ( pSucc.bits() || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, + memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + { + break; + } + } + } + + while ( true ) { + pSucc = gSucc.protect( pDel->next(0), gc_protect ); + marked_node_ptr p( pSucc.ptr() ); + if ( pDel->next(0).compare_exchange_strong( p, marked_node_ptr(p.ptr(), 1), + memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + { + cds::unref(f)( *node_traits::to_value_ptr( pDel )); + + // Physical deletion + // try fast erase + p = pDel; + for ( int nLevel = static_cast( pDel->height() - 1 ); nLevel >= 0; --nLevel ) { + pSucc = gSucc.protect( pDel->next(nLevel), gc_protect ); + if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( p, marked_node_ptr(pSucc.ptr()), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed) ) + { + // Make slow erase + find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ); + m_Stat.onSlowErase(); + return true; + } + } + + // Fast erasing success + gc::retire( node_traits::to_value_ptr( pDel ), dispose_node ); + m_Stat.onFastErase(); + return true; + } + else { + if ( p.bits() ) { + return false; + } + } + } + } + + enum finsd_fastpath_result { + find_fastpath_found, + find_fastpath_not_found, + find_fastpath_abort + }; + template + finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) + { + node_type * pPred; + typename gc::template GuardArray<2> guards; + marked_node_ptr pCur; + marked_node_ptr pNull; + + back_off bkoff; + + pPred = m_Head.head(); + for ( int nLevel = static_cast( m_nHeight.load(memory_model::memory_order_relaxed) - 1 ); nLevel >= 0; --nLevel ) { + pCur = guards.protect( 1, pPred->next(nLevel), gc_protect ); + if ( pCur == pNull ) + continue; + + while ( pCur != pNull ) { + if ( pCur.bits() ) { + unsigned int nAttempt = 0; + while ( pCur.bits() && nAttempt++ < 16 ) { + bkoff(); + pCur = guards.protect( 1, pPred->next(nLevel), gc_protect ); + } + bkoff.reset(); + + if ( pCur.bits() ) { + // Maybe, we are on deleted node sequence + // Abort searching, try slow-path + return find_fastpath_abort; + } + } + + if ( pCur.ptr() ) { + int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val ); + if ( nCmp < 0 ) { + guards.copy( 0, 1 ); + pPred = pCur.ptr(); + pCur = guards.protect( 1, pCur->next(nLevel), gc_protect ); + } + else if ( nCmp == 0 ) { + // found + cds::unref(f)( *node_traits::to_value_ptr( pCur.ptr() ), val ); + return find_fastpath_found; + } + else // pCur > val - go down + break; + } + } + } + + return find_fastpath_not_found; + } + + template + bool find_slowpath( Q& val, Compare cmp, Func f ) + { + position pos; + if ( find_position( val, pos, cmp, true )) { + assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 ); + + cds::unref(f)( *node_traits::to_value_ptr( pos.pCur ), val ); + return true; + } + else + return false; + } + + template + bool find_with_( Q& val, Compare cmp, Func f ) + { + switch ( find_fastpath( val, cmp, f )) { + case find_fastpath_found: + m_Stat.onFindFastSuccess(); + return true; + case find_fastpath_not_found: + m_Stat.onFindFastFailed(); + return false; + default: + break; + } + + if ( find_slowpath( val, cmp, f )) { + m_Stat.onFindSlowSuccess(); + return true; + } + + m_Stat.onFindSlowFailed(); + return false; + } + + template + bool get_with_( typename gc::Guard& guard, Q const& val, Compare cmp ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return find_with_( val, cmp, [&guard](value_type& found, Q const& ) { guard.assign(&found); } ); +# else + get_functor gf(guard); + return find_with_( val, cmp, cds::ref(gf) ); +# endif + } + + template + bool erase_( Q const& val, Compare cmp, Func f ) + { + position pos; + + if ( !find_position( val, pos, cmp, false ) ) { + m_Stat.onEraseFailed(); + return false; + } + + node_type * pDel = pos.pCur; + typename gc::Guard gDel; + gDel.assign( node_traits::to_value_ptr(pDel) ); + assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 ); + + unsigned int nHeight = pDel->height(); + if ( try_remove_at( pDel, pos, f )) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onEraseSuccess(); + return true; + } + + m_Stat.onEraseFailed(); + return false; + } + + template + bool extract_( typename gc::Guard& guard, Q const& val, Compare cmp ) + { + position pos; + + for (;;) { + if ( !find_position( val, pos, cmp, false ) ) { + m_Stat.onExtractFailed(); + return false; + } + + node_type * pDel = pos.pCur; + guard.assign( node_traits::to_value_ptr(pDel)); + assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 ); + + unsigned int nHeight = pDel->height(); + if ( try_remove_at( pDel, pos, +# ifdef CDS_CXX11_LAMBDA_SUPPORT + [](value_type const&) {} +# else + empty_erase_functor() +# endif + )) + { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onExtractSuccess(); + return true; + } + + m_Stat.onExtractRetry(); + } + } + + bool extract_min_( typename gc::Guard& gDel ) + { + position pos; + + for (;;) { + if ( !find_min_position( pos ) ) { + // The list is empty + m_Stat.onExtractMinFailed(); + return false; + } + + node_type * pDel = pos.pCur; + + unsigned int nHeight = pDel->height(); + gDel.assign( node_traits::to_value_ptr(pDel) ); + + if ( try_remove_at( pDel, pos, +# ifdef CDS_CXX11_LAMBDA_SUPPORT + [](value_type const&) {} +# else + empty_erase_functor() +# endif + )) + { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onExtractMinSuccess(); + return true; + } + + m_Stat.onExtractMinRetry(); + } + } + + bool extract_max_( typename gc::Guard& gDel ) + { + position pos; + + for (;;) { + if ( !find_max_position( pos ) ) { + // The list is empty + m_Stat.onExtractMaxFailed(); + return false; + } + + node_type * pDel = pos.pCur; + + unsigned int nHeight = pDel->height(); + gDel.assign( node_traits::to_value_ptr(pDel) ); + + if ( try_remove_at( pDel, pos, +# ifdef CDS_CXX11_LAMBDA_SUPPORT + [](value_type const&) {} +# else + empty_erase_functor() +# endif + )) + { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onExtractMaxSuccess(); + return true; + } + + m_Stat.onExtractMaxRetry(); + } + } + + void increase_height( unsigned int nHeight ) + { + unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed ); + if ( nCur < nHeight ) + m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + } + //@endcond + + public: + /// Default constructor + /** + The constructor checks whether the count of guards is enough + for skip-list and may raise an exception if not. + */ + SkipListSet() + : m_Head( c_nMaxHeight ) + , m_nHeight( c_nMinHeight ) + { + static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); + + gc::check_available_guards( c_nHazardPtrCount ); + + // Barrier for head node + CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release ); + } + + /// Clears and destructs the skip-list + ~SkipListSet() + { + clear(); + } + + public: + /// Iterator type + typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator; + + /// Const iterator type + typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + iterator begin() + { + return iterator( *m_Head.head() ); + } + + /// Returns a forward const iterator addressing the first element in a set + //@{ + const_iterator begin() const + { + return const_iterator( *m_Head.head() ); + } + const_iterator cbegin() + { + return const_iterator( *m_Head.head() ); + } + //@} + + /// Returns a forward iterator that addresses the location succeeding the last element in a set. + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + //@{ + const_iterator end() const + { + return const_iterator(); + } + const_iterator cend() + { + return const_iterator(); + } + //@} + + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return insert( val, []( value_type& ) {} ); +# else + return insert( val, empty_insert_functor() ); +# endif + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success and may be passed by reference + using boost::ref + */ + template + bool insert( value_type& val, Func f ) + { + typename gc::Guard gNew; + gNew.assign( &val ); + + node_type * pNode = node_traits::to_node_ptr( val ); + scoped_node_ptr scp( pNode ); + unsigned int nHeight = pNode->height(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr(); + bool bTowerMade = false; + + position pos; + while ( true ) + { + bool bFound = find_position( val, pos, key_comparator(), true ); + if ( bFound ) { + // scoped_node_ptr deletes the node tower if we create it + if ( !bTowerMade ) + scp.release(); + + m_Stat.onInsertFailed(); + return false; + } + + if ( !bTowerOk ) { + build_node( pNode ); + nHeight = pNode->height(); + bTowerMade = + bTowerOk = true; + } + + if ( !insert_at_position( val, pNode, pos, f )) { + m_Stat.onInsertRetry(); + continue; + } + + increase_height( nHeight ); + ++m_ItemCounter; + m_Stat.onAddNode( nHeight ); + m_Stat.onInsertSuccess(); + scp.release(); + return true; + } + } + + /// Ensures that the \p val exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted into the set. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + The functor can change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You can pass \p func argument by value or by reference using boost::ref or cds::ref. + + Returns std::pair where \p first is \p true if operation is successfull, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair ensure( value_type& val, Func func ) + { + typename gc::Guard gNew; + gNew.assign( &val ); + + node_type * pNode = node_traits::to_node_ptr( val ); + scoped_node_ptr scp( pNode ); + unsigned int nHeight = pNode->height(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr(); + bool bTowerMade = false; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + insert_at_ensure_functor wrapper( func ); +# endif + + position pos; + while ( true ) + { + bool bFound = find_position( val, pos, key_comparator(), true ); + if ( bFound ) { + // scoped_node_ptr deletes the node tower if we create it before + if ( !bTowerMade ) + scp.release(); + + cds::unref(func)( false, *node_traits::to_value_ptr(pos.pCur), val ); + m_Stat.onEnsureExist(); + return std::make_pair( true, false ); + } + + if ( !bTowerOk ) { + build_node( pNode ); + nHeight = pNode->height(); + bTowerMade = + bTowerOk = true; + } + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { cds::unref(func)( true, item, item ); })) +# else + if ( !insert_at_position( val, pNode, pos, cds::ref(wrapper) )) +# endif + { + m_Stat.onInsertRetry(); + continue; + } + + increase_height( nHeight ); + ++m_ItemCounter; + scp.release(); + m_Stat.onAddNode( nHeight ); + m_Stat.onEnsureNew(); + return std::make_pair( true, true ); + } + } + + /// Unlinks the item \p val from the set + /** + The function searches the item \p val in the set and unlink it from the set + if it is found and is equal to \p val. + + Difference between \ref erase and \p unlink functions: \p erase finds a key + and deletes the item found. \p unlink finds an item by key and deletes it + only if \p val is an item of that set, i.e. the pointer to item found + is equal to &val . + + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + position pos; + + if ( !find_position( val, pos, key_comparator(), false ) ) { + m_Stat.onUnlinkFailed(); + return false; + } + + node_type * pDel = pos.pCur; + assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 ); + + unsigned int nHeight = pDel->height(); + typename gc::Guard gDel; + gDel.assign( node_traits::to_value_ptr(pDel) ); + + if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, +# ifdef CDS_CXX11_LAMBDA_SUPPORT + [](value_type const&) {} +# else + empty_erase_functor() +# endif + )) + { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onUnlinkSuccess(); + return true; + } + + m_Stat.onUnlinkFailed(); + return false; + } + + /// Extracts the item from the set with specified \p key + /** \anchor cds_intrusive_SkipListSet_hp_extract + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns it in \p dest parameter. + If the item with key equal to \p key is not found the function returns \p false. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + The \ref disposer specified in \p Traits class template parameter is called automatically + by garbage collector \p GC specified in class' template parameters when returned \ref guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::intrusive::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp; + theList.extract( gp, 5 ); + // Deal with gp + // ... + + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + bool extract( guarded_ptr& dest, Q const& key ) + { + return extract_( dest.guard(), key, key_comparator() ); + } + + /// Extracts the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_hp_extract "extract(Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool extract_with( guarded_ptr& dest, Q const& key, Less pred ) + { + return extract_( dest.guard(), key, cds::opt::details::make_comparator_from_less() ); + } + + /// Extracts an item with minimal key from the list + /** + The function searches an item with minimal key, unlinks it, and returns the item found in \p dest parameter. + If the skip-list is empty the function returns \p false. + + @note Due the concurrent nature of the list, the function extracts nearly minimum key. + It means that the function gets leftmost item and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of list traversing. + + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC automatically when returned \ref guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::intrusive::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp; + if ( theList.extract_min( gp )) { + // Deal with gp + //... + } + // Destructor of gp releases internal HP guard + } + \endcode + */ + bool extract_min( guarded_ptr& dest) + { + return extract_min_( dest.guard() ); + } + + /// Extracts an item with maximal key from the list + /** + The function searches an item with maximal key, unlinks it, and returns the pointer to item found in \p dest parameter. + If the skip-list is empty the function returns empty \p guarded_ptr. + + @note Due the concurrent nature of the list, the function extracts nearly maximal key. + It means that the function gets rightmost item and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key. + So, the function returns the item with maximum key at the moment of list traversing. + + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously when returned \ref guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::intrusive::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp; + if ( theList.extract_max( gp )) { + // Deal with gp + //... + } + // Destructor of gp releases internal HP guard + } + \endcode + */ + bool extract_max( guarded_ptr& dest ) + { + return extract_max_( dest.guard() ); + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_SkipListSet_hp_erase + The function searches an item with key equal to \p val in the set, + unlinks it from the set, and returns \p true. + If the item with key equal to \p val is not found the function return \p false. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( Q const& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_( val, key_comparator(), [](value_type const&) {} ); +# else + return erase_( val, key_comparator(), empty_erase_functor() ); +# endif + } + + /// Deletes the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_hp_erase "erase(Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& val, Less pred ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_( val, cds::opt::details::make_comparator_from_less(), [](value_type const&) {} ); +# else + return erase_( val, cds::opt::details::make_comparator_from_less(), empty_erase_functor() ); +# endif + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_SkipListSet_hp_erase_func + The function searches an item with key equal to \p val in the set, + call \p f functor with item found, unlinks it from the set, and returns \p true. + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + The functor can be passed by reference with boost:ref + + If the item with key equal to \p val is not found the function return \p false. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( Q const& val, Func f ) + { + return erase_( val, key_comparator(), f ); + } + + /// Deletes the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_hp_erase_func "erase(Q const&, Func)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& val, Less pred, Func f ) + { + return erase_( val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_SkipListSet_hp_find_func + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return find_with_( val, key_comparator(), f ); + } + + /// Finds the key \p val with \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SkipListSet_hp_find_func "find(Q&, Func)" + but \p pred is used for key compare. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { + return find_with_( val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_SkipListSet_hp_find_cfunc + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return find_with_( val, key_comparator(), f ); + } + + /// Finds the key \p val with \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SkipListSet_hp_find_cfunc "find(Q const&, Func)" + but \p pred is used for key compare. + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { + return find_with_( val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_SkipListSet_hp_find_val + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool find( Q const & val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return find_with_( val, key_comparator(), [](value_type& , Q const& ) {} ); +# else + return find_with_( val, key_comparator(), empty_find_functor() ); +# endif + } + + /// Finds the key \p val with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_hp_find_val "find(Q const&)" + but \p pred is used for comparing the keys. + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return find_with_( val, cds::opt::details::make_comparator_from_less(), [](value_type& , Q const& ) {} ); +# else + return find_with_( val, cds::opt::details::make_comparator_from_less(), empty_find_functor() ); +# endif + } + + /// Finds the key \p val and return the item found + /** \anchor cds_intrusive_SkipListSet_hp_get + The function searches the item with key equal to \p val + and assigns the item found to guarded pointer \p ptr. + The function returns \p true if \p val is found, and \p false otherwise. + If \p val is not found the \p ptr parameter is not changed. + + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously when returned \ref guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::intrusive::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp; + if ( theList.get( gp, 5 )) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool get( guarded_ptr& ptr, Q const& val ) + { + return get_with_( ptr.guard(), val, key_comparator() ); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_intrusive_SkipListSet_hp_get "get( guarded_ptr& ptr, Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool get_with( guarded_ptr& ptr, Q const& val, Less pred ) + { + return get_with_( ptr.guard(), val, cds::opt::details::make_comparator_from_less() ); + } + + /// Returns item count in the set + /** + The value returned depends on item counter type provided by \p Traits template parameter. + If it is atomicity::empty_item_counter this function always returns 0. + Therefore, the function is not suitable for checking the set emptiness, use \ref empty + member function for this purpose. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Checks if the set is empty + bool empty() const + { + return m_Head.head()->next(0).load( memory_model::memory_order_relaxed ) == null_ptr(); + } + + /// Clears the set (non-atomic) + /** + The function unlink all items from the set. + The function is not atomic, i.e., in multi-threaded environment with parallel insertions + this sequence + \code + set.clear(); + assert( set.empty() ); + \endcode + the assertion could be raised. + + For each item the \ref disposer will be called after unlinking. + */ + void clear() + { + guarded_ptr gp; + while ( extract_min( gp )); + } + + /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. + static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT + { + return c_nMaxHeight; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + }; + +}} // namespace cds::intrusive + + +#endif // #ifndef __CDS_INTRUSIVE_SKIP_LIST_IMPL_H diff --git a/cds/intrusive/skip_list_nogc.h b/cds/intrusive/skip_list_nogc.h new file mode 100644 index 00000000..b3d2ed41 --- /dev/null +++ b/cds/intrusive/skip_list_nogc.h @@ -0,0 +1,1034 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_SKIP_LIST_NOGC_H +#define __CDS_INTRUSIVE_SKIP_LIST_NOGC_H + +#include +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + //@cond + namespace skip_list { + template + class node< cds::gc::nogc, Tag > + { + public: + typedef cds::gc::nogc gc ; ///< Garbage collector + typedef Tag tag ; ///< tag + + typedef CDS_ATOMIC::atomic atomic_ptr; + typedef atomic_ptr tower_item_type; + + protected: + atomic_ptr m_pNext ; ///< Next item in bottom-list (list at level 0) + unsigned int m_nHeight ; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1. + atomic_ptr * m_arrNext ; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p NULL + + public: + /// Constructs a node of height 1 (a bottom-list node) + node() + : m_pNext( null_ptr()) + , m_nHeight(1) + , m_arrNext( null_ptr()) + {} + + /// Constructs a node of height \p nHeight + void make_tower( unsigned int nHeight, atomic_ptr * nextTower ) + { + assert( nHeight > 0 ); + assert( ( nHeight == 1 && nextTower == null_ptr() ) // bottom-list node + || ( nHeight > 1 && nextTower != null_ptr() ) // node at level of more than 0 + ); + + m_arrNext = nextTower; + m_nHeight = nHeight; + } + + atomic_ptr * release_tower() + { + atomic_ptr * pTower = m_arrNext; + m_arrNext = null_ptr(); + m_nHeight = 1; + return pTower; + } + + atomic_ptr * get_tower() const + { + return m_arrNext; + } + + /// Access to element of next pointer array + atomic_ptr& next( unsigned int nLevel ) + { + assert( nLevel < height() ); + assert( nLevel == 0 || (nLevel > 0 && m_arrNext != null_ptr() )); + + return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; + } + + /// Access to element of next pointer array (const version) + atomic_ptr const& next( unsigned int nLevel ) const + { + assert( nLevel < height() ); + assert( nLevel == 0 || nLevel > 0 && m_arrNext != null_ptr() ); + + return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; + } + + /// Access to element of next pointer array (same as \ref next function) + atomic_ptr& operator[]( unsigned int nLevel ) + { + return next( nLevel ); + } + + /// Access to element of next pointer array (same as \ref next function) + atomic_ptr const& operator[]( unsigned int nLevel ) const + { + return next( nLevel ); + } + + /// Height of the node + unsigned int height() const + { + return m_nHeight; + } + + /// Clears internal links + void clear() + { + assert( m_arrNext == null_ptr()); + m_pNext.store( null_ptr(), CDS_ATOMIC::memory_order_release ); + } + + bool is_cleared() const + { + return m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == null_ptr() + && m_arrNext == null_ptr() + && m_nHeight <= 1 +; + } + }; + } // namespace skip_list + + namespace skip_list { namespace details { + + template + class iterator< cds::gc::nogc, NodeTraits, BackOff, IsConst> + { + public: + typedef cds::gc::nogc gc; + typedef NodeTraits node_traits; + typedef BackOff back_off; + typedef typename node_traits::node_type node_type; + typedef typename node_traits::value_type value_type; + static bool const c_isConst = IsConst; + + typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref; + + protected: + typedef typename node_type::atomic_ptr atomic_ptr; + node_type * m_pNode; + + public: // for internal use only!!! + iterator( node_type& refHead ) + : m_pNode( refHead[0].load( CDS_ATOMIC::memory_order_relaxed ) ) + {} + + static iterator from_node( node_type * pNode ) + { + iterator it; + it.m_pNode = pNode; + return it; + } + + public: + iterator() + : m_pNode( null_ptr()) + {} + + iterator( iterator const& s) + : m_pNode( s.m_pNode ) + {} + + value_type * operator ->() const + { + assert( m_pNode != null_ptr< node_type *>() ); + assert( node_traits::to_value_ptr( m_pNode ) != null_ptr() ); + + return node_traits::to_value_ptr( m_pNode ); + } + + value_ref operator *() const + { + assert( m_pNode != null_ptr< node_type *>() ); + assert( node_traits::to_value_ptr( m_pNode ) != null_ptr() ); + + return *node_traits::to_value_ptr( m_pNode ); + } + + /// Pre-increment + iterator& operator ++() + { + if ( m_pNode ) + m_pNode = m_pNode->next(0).load( CDS_ATOMIC::memory_order_relaxed ); + return *this; + } + + iterator& operator = (const iterator& src) + { + m_pNode = src.m_pNode; + return *this; + } + + template + bool operator ==(iterator const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator const& i ) const + { + return !( *this == i ); + } + }; + }} // namespace skip_list::details + //@endcond + + /// Lock-free skip-list set (template specialization for gc::nogc) + /** @ingroup cds_intrusive_map + @anchor cds_intrusive_SkipListSet_nogc + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of list item. + + See \ref cds_intrusive_SkipListSet_hp "SkipListSet" for description of skip-list. + + Template arguments : + - \p T - type to be stored in the set. The type must be based on skip_list::node (for skip_list::base_hook) + or it must have a member of type skip_list::node (for skip_list::member_hook). + - \p Traits - type traits. See skip_list::type_traits for explanation. + + It is possible to declare option-based list with cds::intrusive::skip_list::make_traits metafunction istead of \p Traits template + argument. + Template argument list \p Options of cds::intrusive::skip_list::make_traits metafunction are: + - opt::hook - hook used. Possible values are: skip_list::base_hook, skip_list::member_hook, skip_list::traits_hook. + If the option is not specified, skip_list::base_hook<> and gc::HP is used. + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - skip_list::random_level_generator - random level generator. Can be skip_list::xorshift, skip_list::turbo_pascal or + user-provided one. See skip_list::random_level_generator option description for explanation. + Default is \p %skip_list::turbo_pascal. + - opt::allocator - although the skip-list is an intrusive container, + an allocator should be provided to maintain variable randomly-calculated height of the node + since the node can contain up to 32 next pointers. The allocator option is used to allocate an array of next pointers + for nodes which height is more than 1. Default is \ref CDS_DEFAULT_ALLOCATOR. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::Default is used. + - opt::stat - internal statistics. Available types: skip_list::stat, skip_list::empty_stat (the default) + - opt::disposer - the functor used for dispose removed items. Default is opt::v::empty_disposer. + The disposer is used only in object destructor and in \ref clear function. + + Iterators + + The class supports a forward iterator (\ref iterator and \ref const_iterator). + The iteration is ordered. + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced. + + How to use + + You should incorporate skip_list::node into your struct \p T and provide + appropriate skip_list::type_traits::hook in your \p Traits template parameters. Usually, for \p Traits you + define a struct based on skip_list::type_traits. + + Example for base hook: + \code + #include + + // Data stored in skip list + struct my_data: public cds::intrusive::skip_list::node< cds::gc::nogc > + { + // key field + std::string strKey; + + // other data + // ... + }; + + // my_data compare functor + struct my_data_cmp { + int operator()( const my_data& d1, const my_data& d2 ) + { + return d1.strKey.compare( d2.strKey ); + } + + int operator()( const my_data& d, const std::string& s ) + { + return d.strKey.compare(s); + } + + int operator()( const std::string& s, const my_data& d ) + { + return s.compare( d.strKey ); + } + }; + + + // Declare type_traits + struct my_traits: public cds::intrusive::skip_list::type_traits + { + typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< cds::gc::nogc > > hook; + typedef my_data_cmp compare; + }; + + // Declare skip-list set type + typedef cds::intrusive::SkipListSet< cds::gc::nogc, my_data, my_traits > traits_based_set; + \endcode + + Equivalent option-based code: + \code + // GC-related specialization + #include + + struct my_data { + // see above + }; + struct compare { + // see above + }; + + // Declare option-based skip-list set + typedef cds::intrusive::SkipListSet< cds::gc::nogc + ,my_data + , typename cds::intrusive::skip_list::make_traits< + cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< cds::gc::nogc > > > + ,cds::intrusive::opt::compare< my_data_cmp > + >::type + > option_based_set; + + \endcode + + */ + template < + typename T +#ifdef CDS_DOXYGEN_INVOKED + ,typename Traits = skip_list::type_traits +#else + ,typename Traits +#endif + > + class SkipListSet< cds::gc::nogc, T, Traits > + { + public: + typedef T value_type ; ///< type of value stored in the skip-list + typedef Traits options ; ///< Traits template parameter + + typedef typename options::hook hook ; ///< hook type + typedef typename hook::node_type node_type ; ///< node type + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter. +# else + typedef typename opt::details::make_comparator< value_type, options >::type key_comparator; +# endif + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + + typedef cds::gc::nogc gc ; ///< No garbage collector is used + typedef typename options::item_counter item_counter ; ///< Item counting policy used + typedef typename options::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename options::random_level_generator random_level_generator ; ///< random level generator + typedef typename options::allocator allocator_type ; ///< allocator for maintaining array of next pointers of the node + typedef typename options::back_off back_off ; ///< Back-off trategy + typedef typename options::stat stat ; ///< internal statistics type + typedef typename options::disposer disposer ; ///< disposer used + + /// Max node height. The actual node height should be in range [0 .. c_nMaxHeight) + /** + The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound + but it should be no more than 32 (\ref skip_list::c_nHeightLimit). + */ + static unsigned int const c_nMaxHeight = std::conditional< + (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit), + std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >, + std::integral_constant< unsigned int, skip_list::c_nHeightLimit > + >::type::value; + + //@cond + static unsigned int const c_nMinHeight = 3; + //@endcond + + protected: + typedef typename node_type::atomic_ptr atomic_node_ptr ; ///< Atomic node pointer + + protected: + //@cond + typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder; + + typedef typename std::conditional< + std::is_same< typename options::internal_node_builder, cds::opt::none >::value + ,intrusive_node_builder + ,typename options::internal_node_builder + >::type node_builder; + + typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr; + + struct position { + node_type * pPrev[ c_nMaxHeight ]; + node_type * pSucc[ c_nMaxHeight ]; + + node_type * pCur; + }; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_insert_functor { + void operator()( value_type& ) + {} + }; + + struct empty_find_functor { + template + void operator()( value_type& item, Q& val ) + {} + }; + + template + struct insert_at_ensure_functor { + Func m_func; + insert_at_ensure_functor( Func f ) : m_func(f) {} + + void operator()( value_type& item ) + { + cds::unref( m_func)( true, item, item ); + } + }; + +# endif // ifndef CDS_CXX11_LAMBDA_SUPPORT + + class head_node: public node_type + { + typename node_type::atomic_ptr m_Tower[c_nMaxHeight]; + + public: + head_node( unsigned int nHeight ) + { + for ( size_t i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i ) + m_Tower[i].store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + + node_type::make_tower( nHeight, m_Tower ); + } + + node_type * head() const + { + return const_cast( static_cast(this)); + } + + void clear() + { + for (unsigned int i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i ) + m_Tower[i].store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + node_type::m_pNext.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + } + }; + //@endcond + + protected: + head_node m_Head ; ///< head tower (max height) + + item_counter m_ItemCounter ; ///< item counter + random_level_generator m_RandomLevelGen ; ///< random level generator instance + CDS_ATOMIC::atomic m_nHeight ; ///< estimated high level + mutable stat m_Stat ; ///< internal statistics + + protected: + //@cond + unsigned int random_level() + { + // Random generator produces a number from range [0..31] + // We need a number from range [1..32] + return m_RandomLevelGen() + 1; + } + + template + node_type * build_node( Q v ) + { + return node_builder::make_tower( v, m_RandomLevelGen ); + } + + static void dispose_node( node_type * pNode ) + { + assert( pNode != null_ptr() ); + typename node_builder::node_disposer()( pNode ); + disposer()( node_traits::to_value_ptr( pNode )); + } + + template + bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound, bool bStrictSearch ) const + { + node_type * pPred; + node_type * pSucc; + node_type * pCur = null_ptr(); + + int nCmp = 1; + + unsigned int nHeight = c_nMaxHeight; + retry: + if ( !bStrictSearch ) + nHeight = m_nHeight.load( memory_model::memory_order_relaxed ); + pPred = m_Head.head(); + + for ( int nLevel = (int) nHeight - 1; nLevel >= 0; --nLevel ) { + while ( true ) { + pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed ); + + if ( !pCur ) { + // end of the list at level nLevel - goto next level + break; + } + + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed ); + + if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ) != pCur + || pCur->next( nLevel ).load( memory_model::memory_order_acquire ) != pSucc ) + { + goto retry; + } + + nCmp = cmp( *node_traits::to_value_ptr( pCur ), val ); + if ( nCmp < 0 ) + pPred = pCur; + else if ( nCmp == 0 && bStopIfFound ) + goto found; + else + break; + } + + pos.pPrev[ nLevel ] = pPred; + pos.pSucc[ nLevel ] = pCur; + } + + if ( nCmp != 0 ) + return false; + + found: + pos.pCur = pCur; + return pCur && nCmp == 0; + } + + template + bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f ) + { + unsigned int nHeight = pNode->height(); + + for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) + pNode->next(nLevel).store( null_ptr(), memory_model::memory_order_relaxed ); + + { + node_type * p = pos.pSucc[0]; + pNode->next( 0 ).store( pos.pSucc[ 0 ], memory_model::memory_order_release ); + if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, pNode, memory_model::memory_order_release, memory_model::memory_order_relaxed ) ) { + return false; + } + cds::unref( f )( val ); + } + + for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) { + node_type * p = null_ptr(); + while ( true ) { + node_type * q = pos.pSucc[ nLevel ]; + + if ( pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_release, memory_model::memory_order_relaxed )) { + p = q; + if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, pNode, memory_model::memory_order_release, memory_model::memory_order_relaxed ) ) + break; + } + + // Renew insert position + find_position( val, pos, key_comparator(), false, true ); + } + } + return true; + } + + template + node_type * find_with_( Q& val, Compare cmp, Func f ) const + { + position pos; + if ( find_position( val, pos, cmp, true, false )) { + assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 ); + + cds::unref(f)( *node_traits::to_value_ptr( pos.pCur ), val ); + + m_Stat.onFindFastSuccess(); + return pos.pCur; + } + else { + m_Stat.onFindFastFailed(); + return null_ptr(); + } + } + + void increase_height( unsigned int nHeight ) + { + unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed ); + while ( nCur < nHeight && !m_nHeight.compare_exchange_weak( nCur, nHeight, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ) ); + } + //@endcond + + public: + /// Default constructor + /** + The constructor checks whether the count of guards is enough + for skip-list and may raise an exception if not. + */ + SkipListSet() + : m_Head( c_nMaxHeight ) + , m_nHeight( c_nMinHeight ) + { + static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); + + // Barrier for head node + CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release ); + } + + /// Clears and destructs the skip-list + ~SkipListSet() + { + clear(); + } + + public: + /// Iterator type + typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator; + + /// Const iterator type + typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + iterator begin() + { + return iterator( *m_Head.head() ); + } + + /// Returns a forward const iterator addressing the first element in a set + //@{ + const_iterator begin() const + { + return const_iterator( *m_Head.head() ); + } + const_iterator cbegin() + { + return const_iterator( *m_Head.head() ); + } + //@} + + /// Returns a forward iterator that addresses the location succeeding the last element in a set. + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + //@{ + const_iterator end() const + { + return const_iterator(); + } + const_iterator cend() + { + return const_iterator(); + } + //@} + + protected: + //@cond + iterator nonconst_end() const + { + return iterator(); + } + //@endcond + + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + node_type * pNode = node_traits::to_node_ptr( val ); + scoped_node_ptr scp( pNode ); + unsigned int nHeight = pNode->height(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr(); + bool bTowerMade = false; + + position pos; + while ( true ) + { + bool bFound = find_position( val, pos, key_comparator(), true, true ); + if ( bFound ) { + // scoped_node_ptr deletes the node tower if we create it + if ( !bTowerMade ) + scp.release(); + + m_Stat.onInsertFailed(); + return false; + } + + if ( !bTowerOk ) { + build_node( pNode ); + nHeight = pNode->height(); + bTowerMade = + bTowerOk = true; + } + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + if ( !insert_at_position( val, pNode, pos, empty_insert_functor() )) +# else + if ( !insert_at_position( val, pNode, pos, []( value_type& ) {} )) +# endif + { + m_Stat.onInsertRetry(); + continue; + } + + increase_height( nHeight ); + ++m_ItemCounter; + m_Stat.onAddNode( nHeight ); + m_Stat.onInsertSuccess(); + scp.release(); + return true; + } + } + + /// Ensures that the \p val exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted into the set. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + The functor can change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You can pass \p func argument by value or by reference using boost::ref or cds::ref. + + Returns std::pair where \p first is \p true if operation is successfull, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair ensure( value_type& val, Func func ) + { + node_type * pNode = node_traits::to_node_ptr( val ); + scoped_node_ptr scp( pNode ); + unsigned int nHeight = pNode->height(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr(); + bool bTowerMade = false; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + insert_at_ensure_functor wrapper( func ); +# endif + + position pos; + while ( true ) + { + bool bFound = find_position( val, pos, key_comparator(), true, true ); + if ( bFound ) { + // scoped_node_ptr deletes the node tower if we create it before + if ( !bTowerMade ) + scp.release(); + + cds::unref(func)( false, *node_traits::to_value_ptr(pos.pCur), val ); + m_Stat.onEnsureExist(); + return std::make_pair( true, false ); + } + + if ( !bTowerOk ) { + build_node( pNode ); + nHeight = pNode->height(); + bTowerMade = + bTowerOk = true; + } + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { cds::unref(func)( true, item, item ); })) +# else + if ( !insert_at_position( val, pNode, pos, cds::ref(wrapper) )) +# endif + { + m_Stat.onInsertRetry(); + continue; + } + + increase_height( nHeight ); + ++m_ItemCounter; + scp.release(); + m_Stat.onAddNode( nHeight ); + m_Stat.onEnsureNew(); + return std::make_pair( true, true ); + } + } + + /// Finds the key \p val + /** \anchor cds_intrusive_SkipListSet_nogc_find_func + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) const + { + return find_with_( val, key_comparator(), f ) != null_ptr(); + } + + /// Finds the key \p val using \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SkipListSet_nogc_find_func "find(Q&, Func)" + but \p pred predicate is used for key compare. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) const + { + return find_with_( val, cds::opt::details::make_comparator_from_less(), f ) != null_ptr(); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_SkipListSet_nogc_find_cfunc + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) const + { + return find_with_( val, key_comparator(), f ) != null_ptr(); + } + + /// Finds the key \p val using \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SkipListSet_nogc_find_cfunc "find(Q const&, Func)" + but \p pred predicate is used for key compare. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred, Func f ) const + { + return find_with_( val, cds::opt::details::make_comparator_from_less(), f ) != null_ptr(); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_SkipListSet_nogc_find_val + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + value_type * find( Q const& val ) const + { + node_type * pNode = +# ifdef CDS_CXX11_LAMBDA_SUPPORT + find_with_( val, key_comparator(), [](value_type& , Q const& ) {} ); +# else + find_with_( val, key_comparator(), empty_find_functor() ); +# endif + if ( pNode ) + return node_traits::to_value_ptr( pNode ); + return null_ptr(); + } + + /// Finds the key \p val using \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SkipListSet_nogc_find_val "find(Q const&)" + but \p pred predicate is used for key compare. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + value_type * find_with( Q const& val, Less pred ) const + { + node_type * pNode = +# ifdef CDS_CXX11_LAMBDA_SUPPORT + find_with_( val, cds::opt::details::make_comparator_from_less(), [](value_type& , Q const& ) {} ); +# else + find_with_( val, cds::opt::details::make_comparator_from_less(), empty_find_functor() ); +# endif + if ( pNode ) + return node_traits::to_value_ptr( pNode ); + return null_ptr(); + } + + /// Gets minimum key from the set + /** + If the set is empty the function returns \p NULL + */ + value_type * get_min() const + { + return node_traits::to_value_ptr( m_Head.head()->next( 0 )); + } + + /// Gets maximum key from the set + /** + The function returns \p NULL if the set is empty + */ + value_type * get_max() const + { + node_type * pPred; + + unsigned int nHeight = m_nHeight.load( memory_model::memory_order_relaxed ); + pPred = m_Head.head(); + + for ( int nLevel = (int) nHeight - 1; nLevel >= 0; --nLevel ) { + while ( true ) { + node_type * pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed ); + + if ( !pCur ) { + // end of the list at level nLevel - goto next level + break; + } + pPred = pCur; + } + } + return pPred && pPred != m_Head.head() ? node_traits::to_value_ptr( pPred ) : null_ptr(); + } + + /// Clears the set (non-atomic) + /** + The function is not atomic. + Finding and/or inserting is prohibited while clearing. + Otherwise an unpredictable result may be encountered. + Thus, \p clear() may be used only for debugging purposes. + */ + void clear() + { + node_type * pNode = m_Head.head()->next(0).load( memory_model::memory_order_relaxed ); + m_Head.clear(); + m_ItemCounter.reset(); + m_nHeight.store( c_nMinHeight, memory_model::memory_order_release ); + + while ( pNode ) { + node_type * pNext = pNode->next(0).load( memory_model::memory_order_relaxed ); + dispose_node( pNode ); + pNode = pNext; + } + } + + /// Returns item count in the set + /** + The value returned depends on item counter type provided by \p Traits template parameter. + If it is atomicity::empty_item_counter this function always returns 0. + The function is not suitable for checking the set emptiness, use \ref empty + member function for this purpose. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Checks if the set is empty + bool empty() const + { + return m_Head.head()->next(0).load( memory_model::memory_order_relaxed ) == null_ptr(); + } + + /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. + static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT + { + return c_nMaxHeight; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + }; + +}} // namespace cds::intrusive + + +#endif // #ifndef __CDS_INTRUSIVE_SKIP_LIST_IMPL_H diff --git a/cds/intrusive/skip_list_ptb.h b/cds/intrusive/skip_list_ptb.h new file mode 100644 index 00000000..09acb138 --- /dev/null +++ b/cds/intrusive/skip_list_ptb.h @@ -0,0 +1,9 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_SKIP_LIST_PTB_H +#define __CDS_INTRUSIVE_SKIP_LIST_PTB_H + +#include +#include + +#endif diff --git a/cds/intrusive/skip_list_rcu.h b/cds/intrusive/skip_list_rcu.h new file mode 100644 index 00000000..e03162cc --- /dev/null +++ b/cds/intrusive/skip_list_rcu.h @@ -0,0 +1,2227 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_SKIP_LIST_RCU_H +#define __CDS_INTRUSIVE_SKIP_LIST_RCU_H + +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace cds { namespace intrusive { + + //@cond + namespace skip_list { + + template + class node< cds::urcu::gc< RCU >, Tag > + { + public: + typedef cds::urcu::gc< RCU > gc; ///< Garbage collector + typedef Tag tag ; ///< tag + + // Mark bits: + // bit 0 - the item is logically deleted + // bit 1 - the item is extracted (only for level 0) + typedef cds::details::marked_ptr marked_ptr ; ///< marked pointer + typedef CDS_ATOMIC::atomic< marked_ptr > atomic_marked_ptr ; ///< atomic marked pointer + typedef atomic_marked_ptr tower_item_type; + + protected: + atomic_marked_ptr m_pNext ; ///< Next item in bottom-list (list at level 0) + public: + node * m_pDelChain ; ///< Deleted node chain (local for a thread) +# ifdef _DEBUG + bool volatile m_bLinked; + bool volatile m_bUnlinked; +# endif + protected: + unsigned int m_nHeight ; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1. + atomic_marked_ptr * m_arrNext ; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p NULL + + public: + /// Constructs a node of height 1 (a bottom-list node) + node() + : m_pNext( null_ptr()) + , m_pDelChain( null_ptr()) +# ifdef _DEBUG + , m_bLinked( false ) + , m_bUnlinked( false ) +# endif + , m_nHeight(1) + , m_arrNext( null_ptr()) + {} + +# ifdef _DEBUG + ~node() + { + assert( !m_bLinked || m_bUnlinked ); + } +# endif + + /// Constructs a node of height \p nHeight + void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower ) + { + assert( nHeight > 0 ); + assert( ( nHeight == 1 && nextTower == null_ptr() ) // bottom-list node + || ( nHeight > 1 && nextTower != null_ptr() ) // node at level of more than 0 + ); + + m_arrNext = nextTower; + m_nHeight = nHeight; + } + + atomic_marked_ptr * release_tower() + { + atomic_marked_ptr * pTower = m_arrNext; + m_arrNext = null_ptr(); + m_nHeight = 1; + return pTower; + } + + atomic_marked_ptr * get_tower() const + { + return m_arrNext; + } + + void clear_tower() + { + for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel ) + next(nLevel).store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed ); + } + + /// Access to element of next pointer array + atomic_marked_ptr& next( unsigned int nLevel ) + { + assert( nLevel < height() ); + assert( nLevel == 0 || (nLevel > 0 && m_arrNext != null_ptr() )); + + return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; + } + + /// Access to element of next pointer array (const version) + atomic_marked_ptr const& next( unsigned int nLevel ) const + { + assert( nLevel < height() ); + assert( nLevel == 0 || nLevel > 0 && m_arrNext != null_ptr() ); + + return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; + } + + /// Access to element of next pointer array (same as \ref next function) + atomic_marked_ptr& operator[]( unsigned int nLevel ) + { + return next( nLevel ); + } + + /// Access to element of next pointer array (same as \ref next function) + atomic_marked_ptr const& operator[]( unsigned int nLevel ) const + { + return next( nLevel ); + } + + /// Height of the node + unsigned int height() const + { + return m_nHeight; + } + + /// Clears internal links + void clear() + { + assert( m_arrNext == null_ptr()); + m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_release ); + m_pDelChain = null_ptr(); + } + + bool is_cleared() const + { + return m_pNext == atomic_marked_ptr() + && m_arrNext == null_ptr() + && m_nHeight <= 1; + } + }; + } // namespace skip_list + //@endcond + + //@cond + namespace skip_list { namespace details { + + template + class iterator< cds::urcu::gc< RCU >, NodeTraits, BackOff, IsConst > + { + public: + typedef cds::urcu::gc< RCU > gc; + typedef NodeTraits node_traits; + typedef BackOff back_off; + typedef typename node_traits::node_type node_type; + typedef typename node_traits::value_type value_type; + static bool const c_isConst = IsConst; + + typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref; + + protected: + typedef typename node_type::marked_ptr marked_ptr; + typedef typename node_type::atomic_marked_ptr atomic_marked_ptr; + + node_type * m_pNode; + + protected: + void next() + { + // RCU should be locked before iterating!!! + assert( gc::is_locked() ); + + back_off bkoff; + + for (;;) { + if ( m_pNode->next( m_pNode->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) { + // Current node is marked as deleted. So, its next pointer can point to anything + // In this case we interrupt our iteration and returns end() iterator. + *this = iterator(); + return; + } + + marked_ptr p = m_pNode->next(0).load( CDS_ATOMIC::memory_order_relaxed ); + node_type * pp = p.ptr(); + if ( p.bits() ) { + // p is marked as deleted. Spin waiting for physical removal + bkoff(); + continue; + } + else if ( pp && pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_relaxed ).bits() ) { + // p is marked as deleted. Spin waiting for physical removal + bkoff(); + continue; + } + + m_pNode = pp; + break; + } + } + + public: // for internal use only!!! + iterator( node_type& refHead ) + : m_pNode( null_ptr() ) + { + // RCU should be locked before iterating!!! + assert( gc::is_locked() ); + + back_off bkoff; + + for (;;) { + marked_ptr p = refHead.next(0).load( CDS_ATOMIC::memory_order_relaxed ); + if ( !p.ptr() ) { + // empty skip-list + break; + } + + node_type * pp = p.ptr(); + // Logically deleted node is marked from highest level + if ( !pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) { + m_pNode = pp; + break; + } + + bkoff(); + } + } + + public: + iterator() + : m_pNode( null_ptr()) + { + // RCU should be locked before iterating!!! + assert( gc::is_locked() ); + } + + iterator( iterator const& s) + : m_pNode( s.m_pNode ) + { + // RCU should be locked before iterating!!! + assert( gc::is_locked() ); + } + + value_type * operator ->() const + { + assert( m_pNode != null_ptr< node_type *>() ); + assert( node_traits::to_value_ptr( m_pNode ) != null_ptr() ); + + return node_traits::to_value_ptr( m_pNode ); + } + + value_ref operator *() const + { + assert( m_pNode != null_ptr< node_type *>() ); + assert( node_traits::to_value_ptr( m_pNode ) != null_ptr() ); + + return *node_traits::to_value_ptr( m_pNode ); + } + + /// Pre-increment + iterator& operator ++() + { + next(); + return *this; + } + + iterator& operator = (const iterator& src) + { + m_pNode = src.m_pNode; + return *this; + } + + template + bool operator ==(iterator const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator const& i ) const + { + return !( *this == i ); + } + }; + }} // namespace skip_list::details + //@endcond + + /// Lock-free skip-list set (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_intrusive_map + @anchor cds_intrusive_SkipListSet_rcu + + The implementation of well-known probabilistic data structure called skip-list + invented by W.Pugh in his papers: + - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees + - [1990] W.Pugh A Skip List Cookbook + + A skip-list is a probabilistic data structure that provides expected logarithmic + time search without the need of rebalance. The skip-list is a collection of sorted + linked list. Nodes are ordered by key. Each node is linked into a subset of the lists. + Each list has a level, ranging from 0 to 32. The bottom-level list contains + all the nodes, and each higher-level list is a sublist of the lower-level lists. + Each node is created with a random top level (with a random height), and belongs + to all lists up to that level. The probability that a node has the height 1 is 1/2. + The probability that a node has the height N is 1/2 ** N (more precisely, + the distribution depends on an random generator provided, but our generators + have this property). + + The lock-free variant of skip-list is implemented according to book + - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming", + chapter 14.4 "A Lock-Free Concurrent Skiplist". + \note The algorithm described in this book cannot be directly adapted for C++ (roughly speaking, + the algo contains a lot of bugs). The \b libcds implementation applies the approach discovered + by M.Michael in his \ref cds_intrusive_MichaelList_hp "lock-free linked list". + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p T - type to be stored in the list. The type must be based on \p skip_list::node (for \p skip_list::base_hook) + or it must have a member of type \p skip_list::node (for \p skip_list::member_hook). + - \p Traits - type traits. See \p skip_list::type_traits (the default) for explanation. + + It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction instead of \p Traits template + argument. + Template argument list \p Options of \p %cds::intrusive::skip_list::make_traits metafunction is: + - \p opt::hook - hook used. Possible values are: \p skip_list::base_hook, \p skip_list::member_hook, \p skip_list::traits_hook. + If the option is not specified, skip_list::base_hook<> is used. + - \p opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the \p opt::less is used. + - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. Due the nature + of GC schema the disposer may be called asynchronously. + - \p opt::item_counter - the type of item counting feature. Default is \p atomicity::empty_item_counter that is no item counting. + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + - \p skip_list::random_level_generator - random level generator. Can be \p skip_list::xorshift, \p skip_list::turbo_pascal or + user-provided one. See \p skip_list::random_level_generator option description for explanation. + Default is \p %skip_list::turbo_pascal. + - \p opt::allocator - although the skip-list is an intrusive container, + an allocator should be provided to maintain variable randomly-calculated height of the node + since the node can contain up to 32 next pointers. The allocator option is used to allocate an array of next pointers + for nodes which height is more than 1. Default is \ref CDS_DEFAULT_ALLOCATOR. + - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. + - \p opt::stat - internal statistics. Available types: \p skip_list::stat, \p skip_list::empty_stat (the default) + - \p opt::rcu_check_deadlock - a deadlock checking policy. Default is \p opt::v::rcu_throw_deadlock + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + + Iterators + + The class supports a forward iterator (\ref iterator and \ref const_iterator). + The iteration is ordered. + + You may iterate over skip-list set items only under RCU lock. + Only in this case the iterator is thread-safe since + while RCU is locked any set's item cannot be reclaimed. + + @note The requirement of RCU lock during iterating means that any type of modification of the skip list + (i.e. inserting, erasing and so on) is not possible. + + @warning The iterator object cannot be passed between threads. + + Example how to use skip-list set iterators: + \code + // First, you should include the header for RCU type you have chosen + #include + #include + + typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type; + + struct Foo { + // ... + }; + + // Traits for your skip-list. + // At least, you should define cds::opt::less or cds::opt::compare for Foo struct + struct my_traits: public cds::intrusive::skip_list::type_traits + { + // ... + }; + typedef cds::intrusive::SkipListSet< rcu_type, Foo, my_traits > my_skiplist_set; + + my_skiplist_set theSet; + + // ... + + // Begin iteration + { + // Apply RCU locking manually + typename rcu_type::scoped_lock sl; + + for ( auto it = theList.begin(); it != theList.end(); ++it ) { + // ... + } + + // rcu_type::scoped_lock destructor releases RCU lock implicitly + } + \endcode + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced. + + How to use + + You should incorporate skip_list::node into your struct \p T and provide + appropriate skip_list::type_traits::hook in your \p Traits template parameters. Usually, for \p Traits you + define a struct based on \p skip_list::type_traits. + + Example for cds::urcu::general_buffered<> RCU and base hook: + \code + // First, you should include the header for RCU type you have chosen + #include + + // Include RCU skip-list specialization + #include + + // RCU type typedef + typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type; + + // Data stored in skip list + struct my_data: public cds::intrusive::skip_list::node< rcu_type > + { + // key field + std::string strKey; + + // other data + // ... + }; + + // my_data compare functor + struct my_data_cmp { + int operator()( const my_data& d1, const my_data& d2 ) + { + return d1.strKey.compare( d2.strKey ); + } + + int operator()( const my_data& d, const std::string& s ) + { + return d.strKey.compare(s); + } + + int operator()( const std::string& s, const my_data& d ) + { + return s.compare( d.strKey ); + } + }; + + + // Declare type_traits + struct my_traits: public cds::intrusive::skip_list::type_traits + { + typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > hook; + typedef my_data_cmp compare; + }; + + // Declare skip-list set type + typedef cds::intrusive::SkipListSet< rcu_type, my_data, my_traits > traits_based_set; + \endcode + + Equivalent option-based code: + \code + #include + #include + + typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type; + + struct my_data { + // see above + }; + struct compare { + // see above + }; + + // Declare option-based skip-list set + typedef cds::intrusive::SkipListSet< rcu_type + ,my_data + , typename cds::intrusive::skip_list::make_traits< + cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > > + ,cds::intrusive::opt::compare< my_data_cmp > + >::type + > option_based_set; + + \endcode + */ + template < + class RCU + ,typename T +#ifdef CDS_DOXYGEN_INVOKED + ,typename Traits = skip_list::type_traits +#else + ,typename Traits +#endif + > + class SkipListSet< cds::urcu::gc< RCU >, T, Traits > + { + public: + typedef T value_type ; ///< type of value stored in the skip-list + typedef Traits options ; ///< Traits template parameter + + typedef typename options::hook hook ; ///< hook type + typedef typename hook::node_type node_type ; ///< node type + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter. +# else + typedef typename opt::details::make_comparator< value_type, options >::type key_comparator; +# endif + + typedef typename options::disposer disposer ; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + + typedef cds::urcu::gc< RCU > gc ; ///< Garbage collector + typedef typename options::item_counter item_counter; ///< Item counting policy used + typedef typename options::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename options::random_level_generator random_level_generator ; ///< random level generator + typedef typename options::allocator allocator_type ; ///< allocator for maintaining array of next pointers of the node + typedef typename options::back_off back_off ; ///< Back-off trategy + typedef typename options::stat stat ; ///< internal statistics type + typedef typename options::rcu_check_deadlock rcu_check_deadlock ; ///< Deadlock checking policy + typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock + static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking + + + /// Max node height. The actual node height should be in range [0 .. c_nMaxHeight) + /** + The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound + but it should be no more than 32 (\ref skip_list::c_nHeightLimit). + */ + static unsigned int const c_nMaxHeight = std::conditional< + (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit), + std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >, + std::integral_constant< unsigned int, skip_list::c_nHeightLimit > + >::type::value; + + //@cond + static unsigned int const c_nMinHeight = 5; + //@endcond + + protected: + typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic marked node pointer + typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer + + protected: + //@cond + typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder; + + typedef typename std::conditional< + std::is_same< typename options::internal_node_builder, cds::opt::none >::value + ,intrusive_node_builder + ,typename options::internal_node_builder + >::type node_builder; + + typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr; + + struct position { + node_type * pPrev[ c_nMaxHeight ]; + node_type * pSucc[ c_nMaxHeight ]; + node_type * pNext[ c_nMaxHeight ]; + + node_type * pCur; + node_type * pDelChain; + + position() + : pDelChain( null_ptr()) + {} +# ifdef _DEBUG + ~position() + { + assert( pDelChain == null_ptr()); + } +# endif + }; + + typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_insert_functor { + void operator()( value_type& ) + {} + }; + + struct empty_erase_functor { + void operator()( value_type const& ) + {} + }; + + struct empty_find_functor { + template + void operator()( value_type& item, Q& val ) + {} + }; + + struct get_functor { + value_type * pFound; + + template + void operator()( value_type& item, Q& val ) + { + pFound = &item; + } + }; + + template + struct insert_at_ensure_functor { + Func m_func; + insert_at_ensure_functor( Func f ) : m_func(f) {} + + void operator()( value_type& item ) + { + cds::unref( m_func)( true, item, item ); + } + }; + + struct copy_value_functor { + template + void operator()( Q& dest, value_type const& src ) const + { + dest = src; + } + }; + +# endif // ifndef CDS_CXX11_LAMBDA_SUPPORT + + //@endcond + + protected: + skip_list::details::head_node< node_type > m_Head ; ///< head tower (max height) + + item_counter m_ItemCounter ; ///< item counter + random_level_generator m_RandomLevelGen ; ///< random level generator instance + CDS_ATOMIC::atomic m_nHeight ; ///< estimated high level + CDS_ATOMIC::atomic m_pDeferredDelChain ; ///< Deferred deleted node chain + mutable stat m_Stat ; ///< internal statistics + + protected: + //@cond + unsigned int random_level() + { + // Random generator produces a number from range [0..31] + // We need a number from range [1..32] + return m_RandomLevelGen() + 1; + } + + template + node_type * build_node( Q v ) + { + return node_builder::make_tower( v, m_RandomLevelGen ); + } + + static void dispose_node( value_type * pVal ) + { + assert( pVal != NULL ); + + typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) ); + disposer()( pVal ); + } + + struct node_disposer + { + void operator()( value_type * pVal ) + { + dispose_node( pVal ); + } + }; + //@endcond + + public: + typedef cds::urcu::exempt_ptr< gc, value_type, value_type, node_disposer, void > exempt_ptr ; ///< pointer to extracted node + + protected: + //@cond + + bool is_extracted( marked_node_ptr const p ) const + { + return (p.bits() & 2) != 0; + } + + template + bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound ) + { + assert( gc::is_locked() ); + + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; + int nCmp = 1; + + retry: + pPred = m_Head.head(); + + for ( int nLevel = static_cast(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) { + + while ( true ) { + pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed ); + if ( pCur.bits() ) { + // pCur.bits() means that pPred is logically deleted + goto retry; + } + + if ( pCur.ptr() == null_ptr()) { + // end of the list at level nLevel - goto next level + break; + } + + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed ); + + if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() ) + goto retry; + + if ( pSucc.bits() ) { + // pCur is marked, i.e. logically deleted. + marked_node_ptr p( pCur.ptr() ); + if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + { + if ( nLevel == 0 ) { +# ifdef _DEBUG + pCur->m_bUnlinked = true; +# endif + + if ( !is_extracted( pSucc )) { + // We cannot free the node at this moment since RCU is locked + // Link deleted nodes to a chain to free later + link_for_remove( pos, pCur.ptr() ); + m_Stat.onEraseWhileFind(); + } + else { + m_Stat.onExtractWhileFind(); + } + } + } + goto retry; + } + else { + nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); + if ( nCmp < 0 ) + pPred = pCur.ptr(); + else if ( nCmp == 0 && bStopIfFound ) + goto found; + else + break; + } + } + + // Next level + pos.pPrev[ nLevel ] = pPred; + pos.pSucc[ nLevel ] = pCur.ptr(); + } + + if ( nCmp != 0 ) + return false; + + found: + pos.pCur = pCur.ptr(); + return pCur.ptr() && nCmp == 0; + } + + bool find_min_position( position& pos ) + { + assert( gc::is_locked() ); + + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; + + retry: + pPred = m_Head.head(); + + for ( int nLevel = static_cast(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) { + + pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed ); + // pCur.bits() means that pPred is logically deleted + // head cannot be deleted + assert( pCur.bits() == 0 ); + + if ( pCur.ptr() ) { + + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed ); + + if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() ) + goto retry; + + if ( pSucc.bits() ) { + // pCur is marked, i.e. logically deleted. + marked_node_ptr p( pCur.ptr() ); + if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + { + if ( nLevel == 0 ) { +# ifdef _DEBUG + pCur->m_bUnlinked = true; +# endif + + if ( !is_extracted( pSucc )) { + // We cannot free the node at this moment since RCU is locked + // Link deleted nodes to a chain to free later + link_for_remove( pos, pCur.ptr() ); + m_Stat.onEraseWhileFind(); + } + else { + m_Stat.onExtractWhileFind(); + } + } + } + goto retry; + } + } + + // Next level + pos.pPrev[ nLevel ] = pPred; + pos.pSucc[ nLevel ] = pCur.ptr(); + } + return (pos.pCur = pCur.ptr()) != null_ptr(); + } + + bool find_max_position( position& pos ) + { + assert( gc::is_locked() ); + + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; + +retry: + pPred = m_Head.head(); + + for ( int nLevel = static_cast(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) { + + while ( true ) { + pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed ); + if ( pCur.bits() ) { + // pCur.bits() means that pPred is logically deleted + goto retry; + } + + if ( pCur.ptr() == null_ptr()) { + // end of the list at level nLevel - goto next level + break; + } + + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed ); + + if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() ) + goto retry; + + if ( pSucc.bits() ) { + // pCur is marked, i.e. logically deleted. + marked_node_ptr p( pCur.ptr() ); + if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + { + if ( nLevel == 0 ) { +# ifdef _DEBUG + pCur->m_bUnlinked = true; +# endif + + if ( !is_extracted( pSucc )) { + // We cannot free the node at this moment since RCU is locked + // Link deleted nodes to a chain to free later + link_for_remove( pos, pCur.ptr() ); + m_Stat.onEraseWhileFind(); + } + else { + m_Stat.onExtractWhileFind(); + } + } + } + goto retry; + } + else { + if ( !pSucc.ptr() ) + break; + + pPred = pCur.ptr(); + } + } + + // Next level + pos.pPrev[ nLevel ] = pPred; + pos.pSucc[ nLevel ] = pCur.ptr(); + } + + return (pos.pCur = pCur.ptr()) != null_ptr(); + } + + template + bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f ) + { + assert( gc::is_locked() ); + + unsigned int nHeight = pNode->height(); + pNode->clear_tower(); + + { + marked_node_ptr p( pos.pSucc[0] ); + pNode->next( 0 ).store( p, memory_model::memory_order_release ); + if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { + return false; + } +# ifdef _DEBUG + pNode->m_bLinked = true; +# endif + cds::unref( f )( val ); + } + + for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) { + marked_node_ptr p; + while ( true ) { + marked_node_ptr q( pos.pSucc[ nLevel ]); + if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) { + // pNode has been marked as removed while we are inserting it + // Stop inserting + assert( p.bits() ); + m_Stat.onLogicDeleteWhileInsert(); + return true; + } + p = q; + if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) + break; + + // Renew insert position + m_Stat.onRenewInsertPosition(); + if ( !find_position( val, pos, key_comparator(), false )) { + // The node has been deleted while we are inserting it + m_Stat.onNotFoundWhileInsert(); + return true; + } + } + } + return true; + } + + static void link_for_remove( position& pos, node_type * pDel ) + { + assert( pDel->m_pDelChain == null_ptr() ); + + pDel->m_pDelChain = pos.pDelChain; + pos.pDelChain = pDel; + } + + template + bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract ) + { + assert( pDel != null_ptr()); + assert( gc::is_locked() ); + + marked_node_ptr pSucc; + + // logical deletion (marking) + for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) { + pSucc = pDel->next(nLevel).load( memory_model::memory_order_relaxed ); + while ( true ) { + if ( pSucc.bits() + || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + { + break; + } + } + } + + pSucc = pDel->next(0).load( memory_model::memory_order_relaxed ); + while ( true ) { + if ( pSucc.bits() ) + return false; + + int const nMask = bExtract ? 3 : 1; + if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + { + cds::unref(f)( *node_traits::to_value_ptr( pDel )); + + // physical deletion + // try fast erase + pSucc = pDel; + for ( int nLevel = static_cast( pDel->height() - 1 ); nLevel >= 0; --nLevel ) { + if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( pSucc, + marked_node_ptr( pDel->next(nLevel).load(memory_model::memory_order_relaxed).ptr() ), + memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed) ) + { + // Do slow erase + find_position( *node_traits::to_value_ptr(pDel), pos, key_comparator(), false ); + if ( bExtract ) + m_Stat.onSlowExtract(); + else + m_Stat.onSlowErase(); +# ifdef _DEBUG + assert( pDel->m_bUnlinked ); +# endif + return true; + } + } + +# ifdef _DEBUG + pDel->m_bUnlinked = true; +# endif + if ( !bExtract ) { + // We cannot free the node at this moment since RCU is locked + // Link deleted nodes to a chain to free later + link_for_remove( pos, pDel ); + m_Stat.onFastErase(); + } + else + m_Stat.onFastExtract(); + + return true; + } + } + } + + enum finsd_fastpath_result { + find_fastpath_found, + find_fastpath_not_found, + find_fastpath_abort + }; + template + finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const + { + node_type * pPred; + marked_node_ptr pCur; + marked_node_ptr pSucc; + marked_node_ptr pNull; + + back_off bkoff; + + pPred = m_Head.head(); + for ( int nLevel = static_cast(m_nHeight.load(memory_model::memory_order_relaxed) - 1); nLevel >= 0; --nLevel ) { + pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire ); + if ( pCur == pNull ) + continue; + + while ( pCur != pNull ) { + if ( pCur.bits() ) { + // Wait until pCur is removed + unsigned int nAttempt = 0; + while ( pCur.bits() && nAttempt++ < 16 ) { + bkoff(); + pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire ); + } + bkoff.reset(); + + if ( pCur.bits() ) { + // Maybe, we are on deleted node sequence + // Abort searching, try slow-path + return find_fastpath_abort; + } + } + + if ( pCur.ptr() ) { + int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val ); + if ( nCmp < 0 ) { + pPred = pCur.ptr(); + pCur = pCur->next(nLevel).load( memory_model::memory_order_acquire ); + } + else if ( nCmp == 0 ) { + // found + cds::unref(f)( *node_traits::to_value_ptr( pCur.ptr() ), val ); + return find_fastpath_found; + } + else // pCur > val - go down + break; + } + } + } + + return find_fastpath_not_found; + } + + template + bool find_slowpath( Q& val, Compare cmp, Func f, position& pos ) + { + if ( find_position( val, pos, cmp, true )) { + assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 ); + + cds::unref(f)( *node_traits::to_value_ptr( pos.pCur ), val ); + return true; + } + else + return false; + } + + template + bool do_find_with( Q& val, Compare cmp, Func f ) + { + position pos; + bool bRet; + + rcu_lock l; + + switch ( find_fastpath( val, cmp, f )) { + case find_fastpath_found: + m_Stat.onFindFastSuccess(); + return true; + case find_fastpath_not_found: + m_Stat.onFindFastFailed(); + return false; + default: + break; + } + + if ( find_slowpath( val, cmp, f, pos )) { + m_Stat.onFindSlowSuccess(); + bRet = true; + } + else { + m_Stat.onFindSlowFailed(); + bRet = false; + } + + defer_chain( pos ); + + return bRet; + } + + template + bool do_erase( Q const& val, Compare cmp, Func f ) + { + check_deadlock_policy::check(); + + position pos; + bool bRet; + + { + rcu_lock rcuLock; + + if ( !find_position( val, pos, cmp, false ) ) { + m_Stat.onEraseFailed(); + bRet = false; + } + else { + node_type * pDel = pos.pCur; + assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 ); + + unsigned int nHeight = pDel->height(); + if ( try_remove_at( pDel, pos, f, false )) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onEraseSuccess(); + bRet = true; + } + else { + m_Stat.onEraseFailed(); + bRet = false; + } + } + } + + dispose_chain( pos ); + return bRet; + } + + template + value_type * do_extract_key( Q const& key, Compare cmp ) + { + // RCU should be locked!!! + assert( gc::is_locked() ); + + position pos; + node_type * pDel; + + if ( !find_position( key, pos, cmp, false ) ) { + m_Stat.onExtractFailed(); + pDel = null_ptr(); + } + else { + pDel = pos.pCur; + assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 ); + + unsigned int const nHeight = pDel->height(); + + if ( try_remove_at( pDel, pos, +# ifdef CDS_CXX11_LAMBDA_SUPPORT + [](value_type const&) {} +# else + empty_erase_functor() +# endif + , true )) + { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onExtractSuccess(); + } + else { + m_Stat.onExtractFailed(); + pDel = null_ptr(); + } + } + + defer_chain( pos ); + return pDel ? node_traits::to_value_ptr(pDel) : null_ptr(); + } + + template + bool do_extract( ExemptPtr& result, Q const& key ) + { + check_deadlock_policy::check(); + + bool bReturn; + { + rcu_lock l; + value_type * pDel = do_extract_key( key, key_comparator() ); + bReturn = pDel != null_ptr(); + if ( bReturn ) + result = pDel; + } + + dispose_deferred(); + return bReturn; + } + + template + bool do_extract_with( ExemptPtr& result, Q const& key, Less pred ) + { + check_deadlock_policy::check(); + + bool bReturn; + { + rcu_lock l; + value_type * pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less() ); + bReturn = pDel != null_ptr(); + if ( bReturn ) + result = pDel; + } + + dispose_deferred(); + return bReturn; + } + + node_type * do_extract_min() + { + assert( gc::is_locked() ); + + position pos; + node_type * pDel; + + if ( !find_min_position( pos ) ) { + m_Stat.onExtractMinFailed(); + pDel = null_ptr(); + } + else { + pDel = pos.pCur; + unsigned int const nHeight = pDel->height(); + + if ( try_remove_at( pDel, pos, +# ifdef CDS_CXX11_LAMBDA_SUPPORT + [](value_type const&) {} +# else + empty_erase_functor() +# endif + , true )) + { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onExtractMinSuccess(); + } + else { + m_Stat.onExtractMinFailed(); + pDel = null_ptr(); + } + } + + defer_chain( pos ); + return pDel; + } + + template + bool do_extract_min( ExemptPtr& result ) + { + check_deadlock_policy::check(); + + bool bReturn; + { + rcu_lock l; + node_type * pDel = do_extract_min(); + bReturn = pDel != null_ptr(); + if ( bReturn ) + result = node_traits::to_value_ptr(pDel); + } + + dispose_deferred(); + return bReturn; + } + + node_type * do_extract_max() + { + assert( gc::is_locked() ); + + position pos; + node_type * pDel; + + if ( !find_max_position( pos ) ) { + m_Stat.onExtractMaxFailed(); + pDel = null_ptr(); + } + else { + pDel = pos.pCur; + unsigned int const nHeight = pDel->height(); + + if ( try_remove_at( pDel, pos, +# ifdef CDS_CXX11_LAMBDA_SUPPORT + [](value_type const&) {} +# else + empty_erase_functor() +# endif + , true )) + { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onExtractMaxSuccess(); + } + else { + m_Stat.onExtractMaxFailed(); + pDel = null_ptr(); + } + } + + defer_chain( pos ); + return pDel; + } + + template + bool do_extract_max( ExemptPtr& result ) + { + check_deadlock_policy::check(); + + bool bReturn; + { + rcu_lock l; + node_type * pDel = do_extract_max(); + bReturn = pDel != null_ptr(); + if ( bReturn ) + result = node_traits::to_value_ptr(pDel); + } + + dispose_deferred(); + return bReturn; + } + + void increase_height( unsigned int nHeight ) + { + unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed ); + if ( nCur < nHeight ) + m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + } + + class deferred_list_iterator + { + node_type * pCur; + public: + explicit deferred_list_iterator( node_type * p ) + : pCur(p) + {} + deferred_list_iterator() + : pCur( null_ptr()) + {} + + cds::urcu::retired_ptr operator *() const + { + return cds::urcu::retired_ptr( node_traits::to_value_ptr(pCur), dispose_node ); + } + + void operator ++() + { + pCur = pCur->m_pDelChain; + } + + bool operator ==( deferred_list_iterator const& i ) const + { + return pCur == i.pCur; + } + bool operator !=( deferred_list_iterator const& i ) const + { + return !operator ==( i ); + } + }; + + void dispose_chain( node_type * pHead ) + { + // RCU should NOT be locked + check_deadlock_policy::check(); + + gc::batch_retire( deferred_list_iterator( pHead ), deferred_list_iterator() ); + } + + void dispose_chain( position& pos ) + { + // RCU should NOT be locked + check_deadlock_policy::check(); + + // Delete local chain + if ( pos.pDelChain ) { + dispose_chain( pos.pDelChain ); + pos.pDelChain = null_ptr(); + } + + // Delete deferred chain + dispose_deferred(); + } + + void dispose_deferred() + { + dispose_chain( m_pDeferredDelChain.exchange( null_ptr(), memory_model::memory_order_acq_rel )); + } + + void defer_chain( position& pos ) + { + if ( pos.pDelChain ) { + node_type * pHead = pos.pDelChain; + node_type * pTail = pHead; + while ( pTail->m_pDelChain ) + pTail = pTail->m_pDelChain; + + node_type * pDeferList = m_pDeferredDelChain.load( memory_model::memory_order_relaxed ); + do { + pTail->m_pDelChain = pDeferList; + } while ( !m_pDeferredDelChain.compare_exchange_weak( pDeferList, pHead, memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed )); + + pos.pDelChain = null_ptr(); + } + } + + //@endcond + + public: + /// Default constructor + SkipListSet() + : m_Head( c_nMaxHeight ) + , m_nHeight( c_nMinHeight ) + , m_pDeferredDelChain( null_ptr() ) + { + static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); + + // Barrier for head node + CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release ); + } + + /// Clears and destructs the skip-list + ~SkipListSet() + { + clear(); + } + + public: + /// Iterator type + typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator; + + /// Const iterator type + typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + iterator begin() + { + return iterator( *m_Head.head() ); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return const_iterator( *m_Head.head() ); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() + { + return const_iterator( *m_Head.head() ); + } + + /// Returns a forward iterator that addresses the location succeeding the last element in a set. + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator end() const + { + return const_iterator(); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator cend() + { + return const_iterator(); + } + + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + The function applies RCU lock internally. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return insert( val, []( value_type& ) {} ); +# else + return insert( val, empty_insert_functor() ); +# endif + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success and may be passed by reference + using boost::ref + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool insert( value_type& val, Func f ) + { + check_deadlock_policy::check(); + + position pos; + bool bRet; + + { + node_type * pNode = node_traits::to_node_ptr( val ); + scoped_node_ptr scp( pNode ); + unsigned int nHeight = pNode->height(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr(); + bool bTowerMade = false; + + rcu_lock rcuLock; + + while ( true ) + { + bool bFound = find_position( val, pos, key_comparator(), true ); + if ( bFound ) { + // scoped_node_ptr deletes the node tower if we create it + if ( !bTowerMade ) + scp.release(); + + m_Stat.onInsertFailed(); + bRet = false; + break; + } + + if ( !bTowerOk ) { + build_node( pNode ); + nHeight = pNode->height(); + bTowerMade = + bTowerOk = true; + } + + if ( !insert_at_position( val, pNode, pos, f )) { + m_Stat.onInsertRetry(); + continue; + } + + increase_height( nHeight ); + ++m_ItemCounter; + m_Stat.onAddNode( nHeight ); + m_Stat.onInsertSuccess(); + scp.release(); + bRet = true; + break; + } + } + + dispose_chain( pos ); + + return bRet; + } + + /// Ensures that the \p val exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted into the set. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + The functor can change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You can pass \p func argument by value or by reference using boost::ref or cds::ref. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns std::pair where \p first is \p true if operation is successfull, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair ensure( value_type& val, Func func ) + { + check_deadlock_policy::check(); + + position pos; + std::pair bRet( true, false ); + + { + node_type * pNode = node_traits::to_node_ptr( val ); + scoped_node_ptr scp( pNode ); + unsigned int nHeight = pNode->height(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr(); + bool bTowerMade = false; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + insert_at_ensure_functor wrapper( func ); +# endif + + rcu_lock rcuLock; + while ( true ) + { + bool bFound = find_position( val, pos, key_comparator(), true ); + if ( bFound ) { + // scoped_node_ptr deletes the node tower if we create it before + if ( !bTowerMade ) + scp.release(); + + cds::unref(func)( false, *node_traits::to_value_ptr(pos.pCur), val ); + m_Stat.onEnsureExist(); + break; + } + + if ( !bTowerOk ) { + build_node( pNode ); + nHeight = pNode->height(); + bTowerMade = + bTowerOk = true; + } + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { cds::unref(func)( true, item, item ); })) +# else + if ( !insert_at_position( val, pNode, pos, cds::ref(wrapper) )) +# endif + { + m_Stat.onInsertRetry(); + continue; + } + + increase_height( nHeight ); + ++m_ItemCounter; + scp.release(); + m_Stat.onAddNode( nHeight ); + m_Stat.onEnsureNew(); + bRet.second = true; + break; + } + } + + dispose_chain( pos ); + + return bRet; + } + + /// Unlinks the item \p val from the set + /** + The function searches the item \p val in the set and unlink it from the set + if it is found and is equal to \p val. + + Difference between \ref erase and \p unlink functions: \p erase finds a key + and deletes the item found. \p unlink finds an item by key and deletes it + only if \p val is an item of that set, i.e. the pointer to item found + is equal to &val . + + RCU \p synchronize method can be called. RCU should not be locked. + + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + check_deadlock_policy::check(); + + position pos; + bool bRet; + + { + rcu_lock rcuLock; + + if ( !find_position( val, pos, key_comparator(), false ) ) { + m_Stat.onUnlinkFailed(); + bRet = false; + } + else { + node_type * pDel = pos.pCur; + assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 ); + + unsigned int nHeight = pDel->height(); + + if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, +# ifdef CDS_CXX11_LAMBDA_SUPPORT + [](value_type const&) {} +# else + empty_erase_functor() +# endif + , false )) + { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onUnlinkSuccess(); + bRet = true; + } + else { + m_Stat.onUnlinkFailed(); + bRet = false; + } + } + } + + dispose_chain( pos ); + + return bRet; + } + + /// Extracts the item from the set with specified \p key + /** \anchor cds_intrusive_SkipListSet_rcu_extract + The function searches an item with key equal to \p key in the set, + unlinks it from the set, places it to \p result parameter, and returns \p true. + If the item with key equal to \p key is not found the function returns \p false. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when \p result object is destroyed or when + result.release() is called, see cds::urcu::exempt_ptr for explanation. + @note Before reusing \p result object you should call its \p release() method. + Example: + \code + typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; + skip_list theList; + // ... + + typename skip_list::exempt_ptr ep; + if ( theList.extract( ep, 5 ) ) { + // Deal with ep + //... + + // Dispose returned item. + ep.release(); + } + \endcode + */ + template + bool extract( exempt_ptr& result, Q const& key ) + { + return do_extract( result, key ); + } + + /// Extracts the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_extract "extract(exempt_ptr&, Q const&)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool extract_with( exempt_ptr& result, Q const& key, Less pred ) + { + return do_extract_with( result, key, pred ); + } + + /// Extracts an item with minimal key from the list + /** + The function searches an item with minimal key, unlinks it, and returns the item found in \p result parameter. + If the skip-list is empty the function returns \p false. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when \p result object is destroyed or when + result.release() is called, see cds::urcu::exempt_ptr for explanation. + @note Before reusing \p result object you should call its \p release() method. + Example: + \code + typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; + skip_list theList; + // ... + + typename skip_list::exempt_ptr ep; + if ( theList.extract_min(ep)) { + // Deal with ep + //... + + // Dispose returned item. + ep.release(); + } + \endcode + + @note Due the concurrent nature of the list, the function extracts nearly minimum key. + It means that the function gets leftmost item and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of list traversing. + */ + bool extract_min( exempt_ptr& result ) + { + return do_extract_min( result ); + } + + /// Extracts an item with maximal key from the list + /** + The function searches an item with maximal key, unlinks it, and returns the item found in \p result parameter. + If the skip-list is empty the function returns \p false. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when \p result object is destroyed or when + result.release() is called, see cds::urcu::exempt_ptr for explanation. + @note Before reusing \p result object you should call its \p release() method. + Example: + \code + typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; + skip_list theList; + // ... + + typename skip_list::exempt_ptr ep; + if ( theList.extract_max(ep) ) { + // Deal with ep + //... + // Dispose returned item. + ep.release(); + } + \endcode + + @note Due the concurrent nature of the list, the function extracts nearly maximal key. + It means that the function gets rightmost item and tries to unlink it. + During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key. + So, the function returns the item with maximum key at the moment of list traversing. + */ + bool extract_max( exempt_ptr& result ) + { + return do_extract_max( result ); + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_SkipListSet_rcu_erase + The function searches an item with key equal to \p val in the set, + unlinks it from the set, and returns \p true. + If the item with key equal to \p val is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool erase( const Q& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return do_erase( val, key_comparator(), [](value_type const&) {} ); +# else + return do_erase( val, key_comparator(), empty_erase_functor() ); +# endif + } + + /// Delete the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( const Q& val, Less pred ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return do_erase( val, cds::opt::details::make_comparator_from_less(), [](value_type const&) {} ); +# else + return do_erase( val, cds::opt::details::make_comparator_from_less(), empty_erase_functor() ); +# endif + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_SkipListSet_rcu_erase_func + The function searches an item with key equal to \p val in the set, + call \p f functor with item found, unlinks it from the set, and returns \p true. + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + The functor can be passed by reference with boost:ref + + If the item with key equal to \p val is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool erase( Q const& val, Func f ) + { + return do_erase( val, key_comparator(), f ); + } + + /// Delete the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& val, Less pred, Func f ) + { + return do_erase( val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds the key \p val + /** @anchor cds_intrusive_SkipListSet_rcu_find_func + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + The function applies RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return do_find_with( val, key_comparator(), f ); + } + + /// Finds the key \p val with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)" + but \p cmp is used for key comparison. + \p Less functor has the interface like \p std::less. + \p cmp must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { + return do_find_with( val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds the key \p val + /** @anchor cds_intrusive_SkipListSet_rcu_find_cfunc + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The function applies RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return do_find_with( val, key_comparator(), f ); + } + + /// Finds the key \p val with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_cfunc "find(Q const&, Func)" + but \p cmp is used for key comparison. + \p Less functor has the interface like \p std::less. + \p cmp must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { + return do_find_with( val, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds the key \p val + /** @anchor cds_intrusive_SkipListSet_rcu_find_val + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool find( Q const& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return do_find_with( val, key_comparator(), [](value_type& , Q const& ) {} ); +# else + return do_find_with( val, key_comparator(), empty_find_functor() ); +# endif + } + + /// Finds the key \p val with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_val "find(Q const&)" + but \p pred is used for key compare. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred ) + { + return do_find_with( val, cds::opt::details::make_comparator_from_less(), +# ifdef CDS_CXX11_LAMBDA_SUPPORT + [](value_type& , Q const& ) {} +# else + empty_find_functor() +# endif + ); + } + + /// Finds the key \p val and return the item found + /** \anchor cds_intrusive_SkipListSet_rcu_get + The function searches the item with key equal to \p val and returns the pointer to item found. + If \p val is not found it returns \p NULL. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; + skip_list theList; + // ... + { + // Lock RCU + skip_list::rcu_lock lock; + + foo * pVal = theList.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + } + // Unlock RCU by rcu_lock destructor + // pVal can be retired by disposer at any time after RCU has been unlocked + \endcode + + After RCU unlocking the \p %force_dispose member function can be called manually, + see \ref force_dispose for explanation. + */ + template + value_type * get( Q const& val ) + { + assert( gc::is_locked()); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + value_type * pFound; + return do_find_with( val, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; } ) + ? pFound : null_ptr(); +# else + get_functor gf; + return do_find_with( val, key_comparator(), cds::ref(gf) ) + ? gf.pFound : null_ptr(); +# endif + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + value_type * get_with( Q const& val, Less pred ) + { + assert( gc::is_locked()); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + value_type * pFound; + return do_find_with( val, cds::opt::details::make_comparator_from_less(), + [&pFound](value_type& found, Q const& ) { pFound = &found; } ) + ? pFound : null_ptr(); +# else + get_functor gf; + return do_find_with( val, cds::opt::details::make_comparator_from_less(), cds::ref(gf) ) + ? gf.pFound : null_ptr(); +# endif + } + + /// Returns item count in the set + /** + The value returned depends on item counter type provided by \p Traits template parameter. + If it is atomicity::empty_item_counter this function always returns 0. + Therefore, the function is not suitable for checking the set emptiness, use \ref empty + member function for this purpose. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Checks if the set is empty + bool empty() const + { + return m_Head.head()->next(0).load( memory_model::memory_order_relaxed ) == null_ptr(); + } + + /// Clears the set (non-atomic) + /** + The function unlink all items from the set. + The function is not atomic, thus, in multi-threaded environment with parallel insertions + this sequence + \code + set.clear(); + assert( set.empty() ); + \endcode + the assertion could be raised. + + For each item the \ref disposer will be called automatically after unlinking. + */ + void clear() + { + exempt_ptr ep; + while ( extract_min(ep) ) + ep.release(); + } + + /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. + static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT + { + return c_nMaxHeight; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + /// Clears internal list of ready-to-remove items passing it to RCU reclamation cycle + /** @anchor cds_intrusive_SkipListSet_rcu_force_dispose + Skip list has complex multi-step algorithm for removing an item. In fact, when you + remove the item it is just marked as removed that is enough for the success of your operation. + Actual removing can take place in the future, in another call or even in another thread. + Inside RCU lock the removed item cannot be passed to RCU reclamation cycle + since it can lead to deadlock. To solve this problem, the current skip list implementation + has internal list of items which is ready to remove but is not yet passed to RCU reclamation. + Usually, this list will be passed to RCU reclamation in the next suitable call of skip list member function. + In some cases we want to pass it to RCU reclamation immediately after RCU unlocking. + This function provides such opportunity: it checks whether the RCU is not locked and if it is true + the function passes the internal ready-to-remove list to RCU reclamation cycle. + + The RCU \p synchronize can be called. + */ + void force_dispose() + { + if ( !gc::is_locked() ) + dispose_deferred(); + } + }; + +}} // namespace cds::intrusive + + +#endif // #ifndef __CDS_INTRUSIVE_SKIP_LIST_RCU_H diff --git a/cds/intrusive/split_list.h b/cds/intrusive/split_list.h new file mode 100644 index 00000000..e72f9f0f --- /dev/null +++ b/cds/intrusive/split_list.h @@ -0,0 +1,1115 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_SPLIT_LIST_H +#define __CDS_INTRUSIVE_SPLIT_LIST_H + +#include + +namespace cds { namespace intrusive { + + /// Split-ordered list + /** @ingroup cds_intrusive_map + \anchor cds_intrusive_SplitListSet_hp + + Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see + - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables" + - [2008] Nir Shavit "The Art of Multiprocessor Programming" + + The split-ordered list is a lock-free implementation of an extensible unbounded hash table. It uses original + recursive split-ordering algorithm discovered by Ori Shalev and Nir Shavit that allows to split buckets + without moving an item on resizing. + + \anchor cds_SplitList_algo_desc + Short description + [from [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables"] + + The algorithm keeps all the items in one lock-free linked list, and gradually assigns the bucket pointers to + the places in the list where a sublist of “correct” items can be found. A bucket is initialized upon first + access by assigning it to a new “dummy” node (dashed contour) in the list, preceding all items that should be + in that bucket. A newly created bucket splits an older bucket’s chain, reducing the access cost to its items. The + table uses a modulo 2**i hash (there are known techniques for “pre-hashing” before a modulo 2**i hash + to overcome possible binary correlations among values). The table starts at size 2 and repeatedly doubles in size. + + Unlike moving an item, the operation of directing a bucket pointer can be done + in a single CAS operation, and since items are not moved, they are never “lost”. + However, to make this approach work, one must be able to keep the items in the + list sorted in such a way that any bucket’s sublist can be “split” by directing a new + bucket pointer within it. This operation must be recursively repeatable, as every + split bucket may be split again and again as the hash table grows. To achieve this + goal the authors introduced recursive split-ordering, a new ordering on keys that keeps items + in a given bucket adjacent in the list throughout the repeated splitting process. + + Magically, yet perhaps not surprisingly, recursive split-ordering is achieved by + simple binary reversal: reversing the bits of the hash key so that the new key’s + most significant bits (MSB) are those that were originally its least significant. + The split-order keys of regular nodes are exactly the bit-reverse image of the original + keys after turning on their MSB. For example, items 9 and 13 are in the 1 mod + 4 bucket, which can be recursively split in two by inserting a new node between + them. + + To insert (respectively delete or search for) an item in the hash table, hash its + key to the appropriate bucket using recursive split-ordering, follow the pointer to + the appropriate location in the sorted items list, and traverse the list until the key’s + proper location in the split-ordering (respectively until the key or a key indicating + the item is not in the list is found). Because of the combinatorial structure induced + by the split-ordering, this will require traversal of no more than an expected constant number of items. + + The design is modular: to implement the ordered items list, you can use one of several + non-blocking list-based set algorithms: MichaelList, LazyList. + + Implementation + + Template parameters are: + - \p GC - Garbage collector used. Note the \p GC must be the same as the GC used for \p OrderedList + - \p OrderedList - ordered list implementation used as bucket for hash set, for example, MichaelList, LazyList. + The intrusive ordered list implementation specifies the type \p T stored in the hash-set, the reclamation + schema \p GC used by hash-set, the comparison functor for the type \p T and other features specific for + the ordered list. + - \p Traits - type traits. See split_list::type_traits for explanation. + Instead of defining \p Traits struct you may use option-based syntax with split_list::make_traits metafunction. + + There are several specialization of the split-list class for different \p GC: + - for \ref cds_urcu_gc "RCU type" include - see + \ref cds_intrusive_SplitListSet_rcu "RCU-based split-list" + - for cds::gc::nogc include - see + \ref cds_intrusive_SplitListSet_nogc "persistent SplitListSet". + + \anchor cds_SplitList_hash_functor + Hash functor + + Some member functions of split-ordered list accept the key parameter of type \p Q which differs from \p value_type. + It is expected that type \p Q contains full key of \p value_type, and for equal keys of type \p Q and \p value_type + the hash values of these keys must be equal too. + The hash functor Traits::hash should accept parameters of both type: + \code + // Our node type + struct Foo { + std::string key_ ; // key field + // ... other fields + }; + + // Hash functor + struct fooHash { + size_t operator()( const std::string& s ) const + { + return std::hash( s ); + } + + size_t operator()( const Foo& f ) const + { + return (*this)( f.key_ ); + } + }; + \endcode + + How to use + + First, you should choose ordered list type to use in your split-list set: + \code + // For gc::HP-based MichaelList implementation + #include + + // cds::intrusive::SplitListSet declaration + #include + + // Type of set items + // Note you should declare your struct based on cds::intrusive::split_list::node + // which is a wrapper for ordered-list node struct. + // In our case, the node type for HP-based MichaelList is cds::intrusive::michael_list::node< cds::gc::HP > + struct Foo: public cds::intrusive::split_list::node< cds::intrusive::michael_list::node< cds::gc::HP > > + { + std::string key_ ; // key field + unsigned val_ ; // value field + // ... other value fields + }; + + // Declare comparator for the item + struct FooCmp + { + int operator()( const Foo& f1, const Foo& f2 ) const + { + return f1.key_.compare( f2.key_ ); + } + }; + + // Declare base ordered-list type for split-list + // It may be any ordered list type like MichaelList, LazyList + typedef cds::intrusive::MichaelList< cds::gc::HP, Foo, + typename cds::intrusive::michael_list::make_traits< + // hook option + cds::intrusive::opt::hook< cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::HP > > > + // item comparator option + ,cds::opt::compare< FooCmp > + >::type + > Foo_list; + \endcode + + Second, you should declare split-list set container: + \code + + // Declare hash functor + // Note, the hash functor accepts parameter type Foo and std::string + struct FooHash { + size_t operator()( const Foo& f ) const + { + return cds::opt::v::hash()( f.key_ ); + } + size_t operator()( const std::string& s ) const + { + return cds::opt::v::hash()( s ); + } + }; + + // Split-list set typedef + typedef cds::intrusive::SplitListSet< + cds::gc::HP + ,Foo_list + ,typename cds::intrusive::split_list::make_traits< + cds::opt::hash< FooHash > + >::type + > Foo_set; + \endcode + + Now, you can use \p Foo_set in your application. + \code + Foo_set fooSet; + Foo * foo = new Foo; + foo->key_ = "First"; + + fooSet.insert( *foo ); + + // and so on ... + \endcode + + */ + template < + class GC, + class OrderedList, +# ifdef CDS_DOXYGEN_INVOKED + class Traits = split_list::type_traits +# else + class Traits +# endif + > + class SplitListSet + { + public: + typedef Traits options ; ///< Traits template parameters + typedef GC gc ; ///< Garbage collector + + protected: + //@cond + typedef split_list::details::rebind_list_options wrapped_ordered_list; + //@endcond + + public: +# ifdef CDS_DOXYGEN_INVOKED + typedef OrderedList ordered_list ; ///< type of ordered list used as base for split-list +# else + typedef typename wrapped_ordered_list::result ordered_list; +# endif + typedef typename ordered_list::value_type value_type ; ///< type of value stored in the split-list + typedef typename ordered_list::key_comparator key_comparator ; ///< key comparison functor + typedef typename ordered_list::disposer disposer ; ///< Node disposer functor + + /// Hash functor for \p %value_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename options::hash >::type hash; + + typedef typename options::item_counter item_counter ; ///< Item counter type + typedef typename options::back_off back_off ; ///< back-off strategy for spinning + typedef typename options::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename ordered_list::guarded_ptr guarded_ptr; ///< Guarded pointer + + protected: + typedef typename ordered_list::node_type list_node_type ; ///< Node type as declared in ordered list + typedef split_list::node node_type ; ///< split-list node type + typedef node_type dummy_node_type ; ///< dummy node type + + /// Split-list node traits + /** + This traits is intended for converting between underlying ordered list node type \ref list_node_type + and split-list node type \ref node_type + */ + typedef split_list::node_traits node_traits; + + //@cond + /// Bucket table implementation + typedef typename split_list::details::bucket_table_selector< + options::dynamic_bucket_table + , gc + , dummy_node_type + , opt::allocator< typename options::allocator > + , opt::memory_model< memory_model > + >::type bucket_table; + + //@endcond + + protected: + //@cond + /// Ordered list wrapper to access protected members + class ordered_list_wrapper: public ordered_list + { + typedef ordered_list base_class; + typedef typename base_class::auxiliary_head bucket_head_type; + + public: + bool insert_at( dummy_node_type * pHead, value_type& val ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::insert_at( h, val ); + } + + template + bool insert_at( dummy_node_type * pHead, value_type& val, Func f ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::insert_at( h, val, f ); + } + + template + std::pair ensure_at( dummy_node_type * pHead, value_type& val, Func func ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::ensure_at( h, val, func ); + } + + bool unlink_at( dummy_node_type * pHead, value_type& val ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::unlink_at( h, val ); + } + + template + bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp, Func f ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::erase_at( h, val, cmp, f ); + } + + template + bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::erase_at( h, val, cmp ); + } + + template + bool extract_at( dummy_node_type * pHead, typename gc::Guard& guard, split_list::details::search_value_type const& val, Compare cmp ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::extract_at( h, guard, val, cmp ); + } + + template + bool find_at( dummy_node_type * pHead, split_list::details::search_value_type& val, Compare cmp, Func f ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::find_at( h, val, cmp, f ); + } + + template + bool find_at( dummy_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::find_at( h, val, cmp ); + } + + template + bool get_at( dummy_node_type * pHead, typename gc::Guard& guard, split_list::details::search_value_type const& val, Compare cmp ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::get_at( h, guard, val, cmp ); + } + + bool insert_aux_node( dummy_node_type * pNode ) + { + return base_class::insert_aux_node( pNode ); + } + bool insert_aux_node( dummy_node_type * pHead, dummy_node_type * pNode ) + { + bucket_head_type h(pHead); + return base_class::insert_aux_node( h, pNode ); + } + }; + //@endcond + + protected: + ordered_list_wrapper m_List ; ///< Ordered list containing split-list items + bucket_table m_Buckets ; ///< bucket table + CDS_ATOMIC::atomic m_nBucketCountLog2 ; ///< log2( current bucket count ) + item_counter m_ItemCounter ; ///< Item counter + hash m_HashFunctor ; ///< Hash functor + + protected: + //@cond + typedef cds::details::Allocator< dummy_node_type, typename options::allocator > dummy_node_allocator; + static dummy_node_type * alloc_dummy_node( size_t nHash ) + { + return dummy_node_allocator().New( nHash ); + } + static void free_dummy_node( dummy_node_type * p ) + { + dummy_node_allocator().Delete( p ); + } + + /// Calculates hash value of \p key + template + size_t hash_value( Q const& key ) const + { + return m_HashFunctor( key ); + } + + size_t bucket_no( size_t nHash ) const + { + return nHash & ( (1 << m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed)) - 1 ); + } + + static size_t parent_bucket( size_t nBucket ) + { + assert( nBucket > 0 ); + return nBucket & ~( 1 << bitop::MSBnz( nBucket ) ); + } + + dummy_node_type * init_bucket( size_t nBucket ) + { + assert( nBucket > 0 ); + size_t nParent = parent_bucket( nBucket ); + + dummy_node_type * pParentBucket = m_Buckets.bucket( nParent ); + if ( pParentBucket == null_ptr() ) { + pParentBucket = init_bucket( nParent ); + } + + assert( pParentBucket != null_ptr() ); + + // Allocate a dummy node for new bucket + { + dummy_node_type * pBucket = alloc_dummy_node( split_list::dummy_hash( nBucket ) ); + if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) { + m_Buckets.bucket( nBucket, pBucket ); + return pBucket; + } + free_dummy_node( pBucket ); + } + + // Another thread set the bucket. Wait while it done + + // In this point, we must wait while nBucket is empty. + // The compiler can decide that waiting loop can be "optimized" (stripped) + // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer. + // + back_off bkoff; + while ( true ) { + dummy_node_type volatile * p = m_Buckets.bucket( nBucket ); + if ( p != null_ptr() ) + return const_cast( p ); + bkoff(); + } + } + + dummy_node_type * get_bucket( size_t nHash ) + { + size_t nBucket = bucket_no( nHash ); + + dummy_node_type * pHead = m_Buckets.bucket( nBucket ); + if ( pHead == null_ptr() ) + pHead = init_bucket( nBucket ); + + assert( pHead->is_dummy() ); + + return pHead; + } + + void init() + { + // GC and OrderedList::gc must be the same + static_assert(( std::is_same::value ), "GC and OrderedList::gc must be the same"); + + // atomicity::empty_item_counter is not allowed as a item counter + static_assert(( !std::is_same::value ), "atomicity::empty_item_counter is not allowed as a item counter"); + + // Initialize bucket 0 + dummy_node_type * pNode = alloc_dummy_node( 0 /*split_list::dummy_hash(0)*/ ); + + // insert_aux_node cannot return false for empty list + CDS_VERIFY( m_List.insert_aux_node( pNode )); + + m_Buckets.bucket( 0, pNode ); + } + + void inc_item_count() + { + size_t sz = m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed); + if ( ( ++m_ItemCounter >> sz ) > m_Buckets.load_factor() && ((size_t)(1 << sz )) < m_Buckets.capacity() ) + { + m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ); + } + } + + template + bool find_( Q& val, Compare cmp, Func f ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return m_List.find_at( pHead, sv, cmp, + [&f](value_type& item, split_list::details::search_value_type& val){ cds::unref(f)(item, val.val ); }); +# else + split_list::details::find_functor_wrapper ffw( f ); + return m_List.find_at( pHead, sv, cmp, cds::ref(ffw) ); +# endif + } + + template + bool find_( Q const& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + return m_List.find_at( pHead, sv, cmp ); + } + + template + bool get_( typename gc::Guard& guard, Q const& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + return m_List.get_at( pHead, guard, sv, cmp ); + } + + template + bool get_( typename gc::Guard& guard, Q const& key) + { + return get_( guard, key, key_comparator()); + } + + template + bool get_with_( typename gc::Guard& guard, Q const& key, Less ) + { + return get_( guard, key, typename wrapped_ordered_list::template make_compare_from_less()); + } + + template + bool erase_( Q const& val, Compare cmp, Func f ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + if ( m_List.erase_at( pHead, sv, cmp, f )) { + --m_ItemCounter; + return true; + } + return false; + } + + template + bool erase_( Q const& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + if ( m_List.erase_at( pHead, sv, cmp ) ) { + --m_ItemCounter; + return true; + } + return false; + } + + template + bool extract_( typename gc::Guard& guard, Q const& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + if ( m_List.extract_at( pHead, guard, sv, cmp ) ) { + --m_ItemCounter; + return true; + } + return false; + } + + template + bool extract_( typename gc::Guard& guard, Q const& key) + { + return extract_( guard, key, key_comparator()); + } + + template + bool extract_with_( typename gc::Guard& guard, Q const& key, Less ) + { + return extract_( guard, key, typename wrapped_ordered_list::template make_compare_from_less() ); + } + + //@endcond + + public: + /// Initialize split-ordered list of default capacity + /** + The default capacity is defined in bucket table constructor. + See split_list::expandable_bucket_table, split_list::static_ducket_table + which selects by split_list::dynamic_bucket_table option. + */ + SplitListSet() + : m_nBucketCountLog2(1) + { + init(); + } + + /// Initialize split-ordered list + SplitListSet( + size_t nItemCount ///< estimate average of item count + , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 8, default is 1. + ) + : m_Buckets( nItemCount, nLoadFactor ) + , m_nBucketCountLog2(1) + { + init(); + } + + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + size_t nHash = hash_value( val ); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + + if ( m_List.insert_at( pHead, val )) { + inc_item_count(); + return true; + } + return false; + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success and may be passed by reference + using boost::ref + */ + template + bool insert( value_type& val, Func f ) + { + size_t nHash = hash_value( val ); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + + if ( m_List.insert_at( pHead, val, f )) { + inc_item_count(); + return true; + } + return false; + } + + /// Ensures that the \p val exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted into the set. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same thing. + + The functor can change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You can pass \p func argument by value or by reference using boost::ref or cds::ref. + + Returns std::pair where \p first is \p true if operation is successfull, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair ensure( value_type& val, Func func ) + { + size_t nHash = hash_value( val ); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + + std::pair bRet = m_List.ensure_at( pHead, val, func ); + if ( bRet.first && bRet.second ) + inc_item_count(); + return bRet; + } + + /// Unlinks the item \p val from the set + /** + The function searches the item \p val in the set and unlinks it from the set + if it is found and is equal to \p val. + + Difference between \ref erase and \p unlink functions: \p erase finds a key + and deletes the item found. \p unlink finds an item by key and deletes it + only if \p val is an item of that set, i.e. the pointer to item found + is equal to &val . + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + size_t nHash = hash_value( val ); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + if ( m_List.unlink_at( pHead, val ) ) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_SplitListSet_hp_erase + The function searches an item with key equal to \p val in the set, + unlinks it from the set, and returns \p true. + If the item with key equal to \p val is not found the function return \p false. + + Difference between \ref erase and \p unlink functions: \p erase finds a key + and deletes the item found. \p unlink finds an item by key and deletes it + only if \p val is an item of that set, i.e. the pointer to item found + is equal to &val . + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( Q const& val ) + { + return erase_( val, key_comparator() ); + } + + /// Deletes the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SplitListSet_hp_erase "erase(Q const&)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( const Q& val, Less pred ) + { + return erase_( val, typename wrapped_ordered_list::template make_compare_from_less() ); + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_SplitListSet_hp_erase_func + The function searches an item with key equal to \p val in the set, + call \p f functor with item found, unlinks it from the set, and returns \p true. + The \ref disposer specified by \p OrderedList class template parameter is called + by garbage collector \p GC asynchronously. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + The functor can be passed by reference with boost:ref + + If the item with key equal to \p val is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( Q const& val, Func f ) + { + return erase_( val, key_comparator(), f ); + } + + /// Deletes the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SplitListSet_hp_erase_func "erase(Q const&, Func)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& val, Less pred, Func f ) + { + return erase_( val, typename wrapped_ordered_list::template make_compare_from_less(), f ); + } + + /// Extracts the item with specified \p key + /** \anchor cds_intrusive_SplitListSet_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the set, and returns it in \p dest parameter. + If the item with key equal to \p key is not found the function returns \p false. + + Note the compare functor should accept a parameter of type \p Q that may be not the same as \p value_type. + + The \ref disposer specified in \p OrderedList class' template parameter is called automatically + by garbage collector \p GC when returned \ref guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::intrusive::SplitListSet< your_template_args > splitlist_set; + splitlist_set theSet; + // ... + { + splitlist_set::guarded_ptr gp; + theSet.extract( gp, 5 ); + // Deal with gp + // ... + + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + bool extract( guarded_ptr& dest, Q const& key ) + { + return extract_( dest.guard(), key ); + } + + /// Extracts the item using compare functor \p pred + /** + The function is an analog of \ref cds_intrusive_SplitListSet_hp_extract "extract(guarded_ptr&, Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool extract_with( guarded_ptr& dest, Q const& key, Less pred ) + { + return extract_with_( dest.guard(), key, pred ); + } + + + /// Finds the key \p val + /** \anchor cds_intrusive_SplitListSet_hp_find_func + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return find_( val, key_comparator(), f ); + } + + /// Finds the key \p val with \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SplitListSet_hp_find_func "find(Q&, Func)" + but \p cmp is used for key compare. + \p Less has the interface like \p std::less. + \p cmp must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { + return find_( val, typename wrapped_ordered_list::template make_compare_from_less(), f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_SplitListSet_hp_find_cfunc + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return find_( val, key_comparator(), f ); + } + + /// Finds the key \p val with \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SplitListSet_hp_find_cfunc "find(Q const&, Func)" + but \p cmp is used for key compare. + \p Less has the interface like \p std::less. + \p cmp must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { + return find_( val, typename wrapped_ordered_list::template make_compare_from_less(), f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_SplitListSet_hp_find_val + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + Otherwise, you may use \p find_with functions with explicit predicate for key comparing. + */ + template + bool find( Q const& val ) + { + return find_( val, key_comparator() ); + } + + /// Finds the key \p val with \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SplitListSet_hp_find_val "find(Q const&)" + but \p cmp is used for key compare. + \p Less has the interface like \p std::less. + \p cmp must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred ) + { + return find_( val, typename wrapped_ordered_list::template make_compare_from_less() ); + } + + /// Finds the key \p val and return the item found + /** \anchor cds_intrusive_SplitListSet_hp_get + The function searches the item with key equal to \p val + and assigns the item found to guarded pointer \p ptr. + The function returns \p true if \p val is found, and \p false otherwise. + If \p val is not found the \p ptr parameter is not changed. + + The \ref disposer specified in \p OrderedList class' template parameter is called + by garbage collector \p GC automatically when returned \ref guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::intrusive::SplitListSet< your_template_params > splitlist_set; + splitlist_set theSet; + // ... + { + splitlist_set::guarded_ptr gp; + if ( theSet.get( gp, 5 )) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for \p OrderedList template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool get( guarded_ptr& ptr, Q const& val ) + { + return get_( ptr.guard(), val ); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_intrusive_SplitListSet_hp_get "get( guarded_ptr& ptr, Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool get_with( guarded_ptr& ptr, Q const& val, Less pred ) + { + return get_with_( ptr.guard(), val, pred ); + } + + /// Returns item count in the set + size_t size() const + { + return m_ItemCounter; + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, the correct item counting feature is an important part of split-list set implementation. + */ + bool empty() const + { + return size() == 0; + } + + /// Clears the set (non-atomic) + /** + The function unlink all items from the set. + The function is not atomic. Therefore, \p clear may be used only for debugging purposes. + + For each item the \p disposer is called after unlinking. + */ + void clear() + { + iterator it = begin(); + while ( it != end() ) { + iterator i(it); + ++i; + unlink( *it ); + it = i; + } + } + + protected: + //@cond + template + class iterator_type + :public split_list::details::iterator_type + { + typedef split_list::details::iterator_type iterator_base_class; + typedef typename iterator_base_class::list_iterator list_iterator; + public: + iterator_type() + : iterator_base_class() + {} + + iterator_type( iterator_type const& src ) + : iterator_base_class( src ) + {} + + // This ctor should be protected... + iterator_type( list_iterator itCur, list_iterator itEnd ) + : iterator_base_class( itCur, itEnd ) + {} + }; + //@endcond + public: + /// Forward iterator + /** + The forward iterator for a split-list has some features: + - it has no post-increment operator + - it depends on iterator of underlying \p OrderedList + - The iterator cannot be moved across thread boundary since it may contain GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the split-list. + + Therefore, the use of iterators in concurrent environment is not good idea. Use the iterator on the concurrent container + for debug purpose only. + */ + typedef iterator_type iterator; + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a split-list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_List.begin(), m_List.end() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a split-list + /** + Do not use the value returned by end function to access any item. + + The returned value can be used only to control reaching the end of the split-list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_List.end(), m_List.end() ); + } + + /// Returns a forward const iterator addressing the first element in a split-list + const_iterator begin() const + { + return const_iterator( m_List.begin(), m_List.end() ); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a split-list + const_iterator end() const + { + return const_iterator( m_List.end(), m_List.end() ); + } + + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_SPLIT_LIST_H diff --git a/cds/intrusive/split_list_base.h b/cds/intrusive/split_list_base.h new file mode 100644 index 00000000..98609d0d --- /dev/null +++ b/cds/intrusive/split_list_base.h @@ -0,0 +1,824 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_SPLIT_LIST_BASE_H +#define __CDS_INTRUSIVE_SPLIT_LIST_BASE_H + +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// Split-ordered list related definitions + /** @ingroup cds_intrusive_helper + */ + namespace split_list { + /// Split-ordered list node + /** + Template parameter: + - OrderedListNode - node type for underlying ordered list + */ + template + struct node: public OrderedListNode + { + //@cond + typedef OrderedListNode base_class; + //@endcond + + size_t m_nHash ; ///< Hash value for node + + /// Default constructor + node() + : m_nHash(0) + { + assert( is_dummy() ); + } + + /// Initializes dummy node with \p nHash value + node( size_t nHash ) + : m_nHash( nHash ) + { + assert( is_dummy() ); + } + + /// Checks if the node is dummy node + bool is_dummy() const + { + return (m_nHash & 1) == 0; + } + }; + + + /// Type traits for SplitListSet class + struct type_traits { + /// Hash function + /** + Hash function converts the key fields of struct \p T stored in the split list + into value of type \p size_t called hash value that is an index of hash table. + + This is mandatory type and has no predefined one. + */ + typedef opt::none hash; + + /// Item counter + /** + The item counting is an important part of SplitListSet algorithm: + the empty() member function depends on correct item counting. + Therefore, atomicity::empty_item_counter is not allowed as a type of the option. + + Default is atomicity::item_counter. + */ + typedef atomicity::item_counter item_counter; + + /// Bucket table allocator + /** + Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR + */ + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// C++ memory model for atomic operations + /** + Can be opt::v::relaxed_ordering (relaxed memory model, the default) or opt::v::sequential_consistent (sequentially consisnent memory model). + */ + typedef opt::v::relaxed_ordering memory_model; + + /// What type of bucket table is used + /** + \p true - use split_list::expandable_bucket_table that can be expanded + if the load factor of the set is exhausted + \p false - use split_list::static_bucket_table that cannot be expanded + + Default is \p true. + */ + static const bool dynamic_bucket_table = true; + + /// back-off strategy used + /** + If the option is not specified, the cds::backoff::Default is used. + */ + typedef cds::backoff::Default back_off; + }; + + /// [value-option] Split-list dynamic bucket table option + /** + The option is used to select bucket table implementation. + Possible values of \p Value are: + - \p true - select \ref expandable_bucket_table implementation + - \p false - select \ref static_bucket_table implementation + */ + template + struct dynamic_bucket_table + { + //@cond + template struct pack: public Base + { + enum { dynamic_bucket_table = Value }; + }; + //@endcond + }; + + /// Metafunction converting option list to traits struct + /** + This is a wrapper for cds::opt::make_options< type_traits, Options...> + + Available \p Options: + - opt::hash - mandatory option, specifies hash functor. + - opt::item_counter - optional, specifies item counting policy. See type_traits::item_counter + for default type. + - opt::memory_model - C++ memory model for atomic operations. + Can be opt::v::relaxed_ordering (relaxed memory model, the default) or opt::v::sequential_consistent (sequentially consisnent memory model). + - opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR. + - split_list::dynamic_bucket_table - use dynamic or static bucket table implementation. + Dynamic bucket table expands its size up to maximum bucket count when necessary + - opt::back_off - back-off strategy used for spinning. If the option is not specified, the cds::backoff::Default is used. + + See \ref MichaelHashSet, \ref type_traits. + */ + template + struct make_traits { + typedef typename cds::opt::make_options< type_traits, CDS_OPTIONS6>::type type ; ///< Result of metafunction + }; + + + /// Static bucket table + /** + Non-resizeable bucket table for SplitListSet class. + The capacity of table (max bucket count) is defined in the constructor call. + + Template parameter: + - \p GC - garbage collector used + - \p Node - node type, must be a type based on\ref node template + - \p Options... - options + + \p Options are: + - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::memory_model - memory model used. Possible types are opt::v::sequential_consistent, opt::v::relaxed_ordering + */ + template + class static_bucket_table + { + //@cond + struct default_options + { + typedef CDS_DEFAULT_ALLOCATOR allocator; + typedef opt::v::relaxed_ordering memory_model; + }; + typedef typename opt::make_options< default_options, CDS_OPTIONS2 >::type options; + //@endcond + + public: + typedef GC gc ; ///< Garbage collector + typedef Node node_type ; ///< Bucket node type + typedef CDS_ATOMIC::atomic table_entry ; ///< Table entry type + + /// Bucket table allocator + typedef cds::details::Allocator< table_entry, typename options::allocator > bucket_table_allocator; + + /// Memory model for atomic operations + typedef typename options::memory_model memory_model; + + protected: + const size_t m_nLoadFactor ; ///< load factor (average count of items per bucket) + const size_t m_nCapacity ; ///< Bucket table capacity + table_entry * m_Table ; ///< Bucket table + + protected: + //@cond + void allocate_table() + { + m_Table = bucket_table_allocator().NewArray( m_nCapacity, null_ptr() ); + } + + void destroy_table() + { + bucket_table_allocator().Delete( m_Table, m_nCapacity ); + } + //@endcond + + public: + /// Constructs bucket table for 512K buckets. Load factor is 1. + static_bucket_table() + : m_nLoadFactor(1) + , m_nCapacity( 512 * 1024 ) + { + allocate_table(); + } + + /// Constructs + static_bucket_table( + size_t nItemCount, ///< Max expected item count in split-ordered list + size_t nLoadFactor ///< Load factor + ) + : m_nLoadFactor( nLoadFactor > 0 ? nLoadFactor : (size_t) 1 ), + m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor ) ) + { + // m_nCapacity must be power of 2 + assert( cds::beans::is_power2( m_nCapacity ) ); + allocate_table(); + } + + /// Destroy bucket table + ~static_bucket_table() + { + destroy_table(); + } + + /// Returns head node of bucket \p nBucket + node_type * bucket( size_t nBucket ) const + { + assert( nBucket < capacity() ); + return m_Table[ nBucket ].load(memory_model::memory_order_acquire); + } + + /// Set head node \p pNode of bucket \p nBucket + void bucket( size_t nBucket, node_type * pNode ) + { + assert( nBucket < capacity() ); + assert( bucket(nBucket) == null_ptr() ); + + m_Table[ nBucket ].store( pNode, memory_model::memory_order_release ); + } + + /// Returns the capacity of the bucket table + size_t capacity() const + { + return m_nCapacity; + } + + /// Returns the load factor, i.e. average count of items per bucket + size_t load_factor() const + { + return m_nLoadFactor; + } + }; + + /// Expandable bucket table + /** + This bucket table can dynamically grow its capacity when necessary + up to maximum bucket count. + + Template parameter: + - \p GC - garbage collector used + - \p Node - node type, must be an instantiation of \ref node template + - \p Options... - options + + \p Options are: + - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::memory_model - memory model used. Possible types are opt::v::sequential_consistent, opt::v::relaxed_ordering + */ + template + class expandable_bucket_table + { + //@cond + struct default_options + { + typedef CDS_DEFAULT_ALLOCATOR allocator; + typedef opt::v::relaxed_ordering memory_model; + }; + typedef typename opt::make_options< default_options, CDS_OPTIONS2 >::type options; + //@endcond + public: + typedef GC gc ; ///< Garbage collector + typedef Node node_type ; ///< Bucket node type + typedef CDS_ATOMIC::atomic table_entry ; ///< Table entry type + + /// Memory model for atomic operations + typedef typename options::memory_model memory_model; + + protected: + typedef CDS_ATOMIC::atomic segment_type ; ///< Bucket table segment type + + public: + /// Bucket table allocator + typedef cds::details::Allocator< segment_type, typename options::allocator > bucket_table_allocator; + + /// Bucket table segment allocator + typedef cds::details::Allocator< table_entry, typename options::allocator > segment_allocator; + + protected: + /// Bucket table metrics + struct metrics { + size_t nSegmentCount ; ///< max count of segments in bucket table + size_t nSegmentSize ; ///< the segment's capacity. The capacity must be power of two. + size_t nSegmentSizeLog2 ; ///< log2( m_nSegmentSize ) + size_t nLoadFactor ; ///< load factor + size_t nCapacity ; ///< max capacity of bucket table + + metrics() + : nSegmentCount(1024) + , nSegmentSize(512) + , nSegmentSizeLog2( cds::beans::log2( nSegmentSize ) ) + , nLoadFactor(1) + , nCapacity( nSegmentCount * nSegmentSize ) + {} + }; + + const metrics m_metrics ; ///< Dynamic bucket table metrics + + protected: + //const size_t m_nLoadFactor; ///< load factor (average count of items per bucket) + //const size_t m_nCapacity ; ///< Bucket table capacity + segment_type * m_Segments ; ///< bucket table - array of segments + + protected: + //@cond + metrics calc_metrics( size_t nItemCount, size_t nLoadFactor ) + { + metrics m; + + // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount + m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1; + + size_t nBucketCount = (size_t)( ((float) nItemCount) / m.nLoadFactor ); + if ( nBucketCount <= 2 ) { + m.nSegmentCount = 1; + m.nSegmentSize = 2; + } + else if ( nBucketCount <= 1024 ) { + m.nSegmentCount = 1; + m.nSegmentSize = ((size_t) 1) << beans::log2ceil( nBucketCount ); + } + else { + nBucketCount = beans::log2ceil( nBucketCount ); + m.nSegmentCount = + m.nSegmentSize = ((size_t) 1) << ( nBucketCount / 2 ); + if ( nBucketCount & 1 ) + m.nSegmentSize *= 2; + if ( m.nSegmentCount * m.nSegmentSize * m.nLoadFactor < nItemCount ) + m.nSegmentSize *= 2; + } + m.nCapacity = m.nSegmentCount * m.nSegmentSize; + m.nSegmentSizeLog2 = cds::beans::log2( m.nSegmentSize ); + assert( m.nSegmentSizeLog2 != 0 ) ; // + return m; + } + + segment_type * allocate_table() + { + return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, null_ptr() ); + } + + void destroy_table( segment_type * pTable ) + { + bucket_table_allocator().Delete( pTable, m_metrics.nSegmentCount ); + } + + table_entry * allocate_segment() + { + return segment_allocator().NewArray( m_metrics.nSegmentSize, null_ptr() ); + } + + void destroy_segment( table_entry * pSegment ) + { + segment_allocator().Delete( pSegment, m_metrics.nSegmentSize ); + } + + void init_segments() + { + // m_nSegmentSize must be 2**N + assert( cds::beans::is_power2( m_metrics.nSegmentSize )); + assert( ( ((size_t) 1) << m_metrics.nSegmentSizeLog2) == m_metrics.nSegmentSize ); + + // m_nSegmentCount must be 2**K + assert( cds::beans::is_power2( m_metrics.nSegmentCount )); + + m_Segments = allocate_table(); + } + + //@endcond + + public: + /// Constructs bucket table for 512K buckets. Load factor is 1. + expandable_bucket_table() + : m_metrics( calc_metrics( 512 * 1024, 1 )) + { + init_segments(); + } + + /// Constructs + expandable_bucket_table( + size_t nItemCount, ///< Max expected item count in split-ordered list + size_t nLoadFactor ///< Load factor + ) + : m_metrics( calc_metrics( nItemCount, nLoadFactor )) + { + init_segments(); + } + + /// Destroy bucket table + ~expandable_bucket_table() + { + segment_type * pSegments = m_Segments; + for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) { + table_entry * pEntry = pSegments[i].load(memory_model::memory_order_relaxed); + if ( pEntry != null_ptr() ) + destroy_segment( pEntry ); + } + destroy_table( pSegments ); + } + + /// Returns head node of the bucket \p nBucket + node_type * bucket( size_t nBucket ) const + { + size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2; + assert( nSegment < m_metrics.nSegmentCount ); + + table_entry * pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire); + if ( pSegment == null_ptr() ) + return null_ptr() ; // uninitialized bucket + return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire); + } + + /// Set head node \p pNode of bucket \p nBucket + void bucket( size_t nBucket, node_type * pNode ) + { + size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2; + assert( nSegment < m_metrics.nSegmentCount ); + + segment_type& segment = m_Segments[nSegment]; + if ( segment.load(memory_model::memory_order_relaxed) == null_ptr() ) { + table_entry * pNewSegment = allocate_segment(); + table_entry * pNull = null_ptr(); + if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { + destroy_segment( pNewSegment ); + } + } + segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release ); + } + + /// Returns the capacity of the bucket table + size_t capacity() const + { + return m_metrics.nCapacity; + } + + /// Returns the load factor, i.e. average count of items per bucket + size_t load_factor() const + { + return m_metrics.nLoadFactor; + } + }; + + /// Split-list node traits + /** + This traits is intended for converting between underlying ordered list node type + and split-list node type + + Template parameter: + - \p BaseNodeTraits - node traits of base ordered list type + */ + template + struct node_traits: private BaseNodeTraits + { + typedef BaseNodeTraits base_class ; ///< Base ordered list type + typedef typename base_class::value_type value_type ; ///< Value type + typedef typename base_class::node_type base_node_type ; ///< Ordered list node type + typedef node node_type ; ///< Spit-list node type + + /// Convert value reference to node pointer + static node_type * to_node_ptr( value_type& v ) + { + return static_cast( base_class::to_node_ptr( v ) ); + } + + /// Convert value pointer to node pointer + static node_type * to_node_ptr( value_type * v ) + { + return static_cast( base_class::to_node_ptr( v ) ); + } + + /// Convert value reference to node pointer (const version) + static node_type const * to_node_ptr( value_type const& v ) + { + return static_cast( base_class::to_node_ptr( v ) ); + } + + /// Convert value pointer to node pointer (const version) + static node_type const * to_node_ptr( value_type const * v ) + { + return static_cast( base_class::to_node_ptr( v ) ); + } + + /// Convert node refernce to value pointer + static value_type * to_value_ptr( node_type& n ) + { + return base_class::to_value_ptr( static_cast( n ) ); + } + + /// Convert node pointer to value pointer + static value_type * to_value_ptr( node_type * n ) + { + return base_class::to_value_ptr( static_cast( n ) ); + } + + /// Convert node reference to value pointer (const version) + static const value_type * to_value_ptr( node_type const & n ) + { + return base_class::to_value_ptr( static_cast( n ) ); + } + + /// Convert node pointer to value pointer (const version) + static const value_type * to_value_ptr( node_type const * n ) + { + return base_class::to_value_ptr( static_cast( n ) ); + } + }; + + + //@cond + namespace details { + template + struct bucket_table_selector; + + template + struct bucket_table_selector< true, GC, Node, CDS_OPTIONS2> + { + typedef expandable_bucket_table type; + }; + + template + struct bucket_table_selector< false, GC, Node, CDS_OPTIONS2> + { + typedef static_bucket_table type; + }; + + template + struct dummy_node_disposer { + template + void operator()( Node * p ) + { + typedef cds::details::Allocator< Node, Alloc > node_deallocator; + node_deallocator().Delete( p ); + } + }; + + template + struct search_value_type + { + Q& val; + size_t nHash; + + search_value_type( Q& v, size_t h ) + : val( v ) + , nHash( h ) + {} + /* + operator Q&() const + { + return val; + } + */ + }; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + template + class find_functor_wrapper: protected cds::details::functor_wrapper + { + typedef cds::details::functor_wrapper base_class; + public: + find_functor_wrapper( Func f ) + : base_class( f ) + {} + + template + void operator()( ValueType& item, split_list::details::search_value_type& val ) + { + base_class::get()( item, val.val ); + } + }; +# endif + + template + class rebind_list_options + { + typedef OrderedList native_ordered_list; + typedef Options options; + + typedef typename native_ordered_list::gc gc; + typedef typename native_ordered_list::key_comparator native_key_comparator; + typedef typename native_ordered_list::node_type node_type; + typedef typename native_ordered_list::value_type value_type; + typedef typename native_ordered_list::node_traits node_traits; + typedef typename native_ordered_list::disposer native_disposer; + + typedef split_list::node splitlist_node_type; + + struct key_compare { + int operator()( value_type const& v1, value_type const& v2 ) const + { + splitlist_node_type const * n1 = static_cast( node_traits::to_node_ptr( v1 )); + splitlist_node_type const * n2 = static_cast( node_traits::to_node_ptr( v2 )); + if ( n1->m_nHash != n2->m_nHash ) + return n1->m_nHash < n2->m_nHash ? -1 : 1; + + if ( n1->is_dummy() ) { + assert( n2->is_dummy() ); + return 0; + } + + assert( !n1->is_dummy() && !n2->is_dummy() ); + + return native_key_comparator()( v1, v2 ); + } + + template + int operator()( value_type const& v, search_value_type const& q ) const + { + splitlist_node_type const * n = static_cast( node_traits::to_node_ptr( v )); + if ( n->m_nHash != q.nHash ) + return n->m_nHash < q.nHash ? -1 : 1; + + assert( !n->is_dummy() ); + return native_key_comparator()( v, q.val ); + } + + template + int operator()( search_value_type const& q, value_type const& v ) const + { + return -operator()( v, q ); + } + }; + + struct wrapped_disposer + { + void operator()( value_type * v ) + { + splitlist_node_type * p = static_cast( node_traits::to_node_ptr( v )); + if ( p->is_dummy() ) + dummy_node_disposer()( p ); + else + native_disposer()( v ); + } + }; + + public: + template + struct make_compare_from_less: public cds::opt::details::make_comparator_from_less + { + typedef cds::opt::details::make_comparator_from_less base_class; + + template + int operator()( value_type const& v, search_value_type const& q ) const + { + splitlist_node_type const * n = static_cast( node_traits::to_node_ptr( v )); + if ( n->m_nHash != q.nHash ) + return n->m_nHash < q.nHash ? -1 : 1; + + assert( !n->is_dummy() ); + return base_class()( v, q.val ); + } + + template + int operator()( search_value_type const& q, value_type const& v ) const + { + splitlist_node_type const * n = static_cast( node_traits::to_node_ptr( v )); + if ( n->m_nHash != q.nHash ) + return q.nHash < n->m_nHash ? -1 : 1; + + assert( !n->is_dummy() ); + return base_class()( q.val, v ); + } + + template + int operator()( Q1 const& v1, Q2 const& v2 ) const + { + return base_class()( v1, v2 ); + } + }; + + typedef typename native_ordered_list::template rebind_options< + opt::compare< key_compare > + ,opt::disposer< wrapped_disposer > + ,opt::boundary_node_type< splitlist_node_type > + >::type result; + }; + + template + struct select_list_iterator; + + template + struct select_list_iterator + { + typedef typename OrderedList::iterator type; + }; + + template + struct select_list_iterator + { + typedef typename OrderedList::const_iterator type; + }; + + template + class iterator_type + { + typedef OrderedList ordered_list_type; + protected: + typedef typename select_list_iterator::type list_iterator; + typedef NodeTraits node_traits; + + private: + list_iterator m_itCur; + list_iterator m_itEnd; + + public: + typedef typename list_iterator::value_ptr value_ptr; + typedef typename list_iterator::value_ref value_ref; + + public: + iterator_type() + {} + + iterator_type( iterator_type const& src ) + : m_itCur( src.m_itCur ) + , m_itEnd( src.m_itEnd ) + {} + + // This ctor should be protected... + iterator_type( list_iterator itCur, list_iterator itEnd ) + : m_itCur( itCur ) + , m_itEnd( itEnd ) + { + // skip dummy nodes + while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() ) + ++m_itCur; + } + + + value_ptr operator ->() const + { + return m_itCur.operator->(); + } + + value_ref operator *() const + { + return m_itCur.operator*(); + } + + /// Pre-increment + iterator_type& operator ++() + { + if ( m_itCur != m_itEnd ) { + do { + ++m_itCur; + } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() ); + } + return *this; + } + + iterator_type& operator = (iterator_type const& src) + { + m_itCur = src.m_itCur; + m_itEnd = src.m_itEnd; + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return m_itCur == i.m_itCur; + } + template + bool operator !=(iterator_type const& i ) const + { + return m_itCur != i.m_itCur; + } + }; + + + } // namespace details + //@endcond + + //@cond + // Helper functions + + /// Reverses bit order in \p nHash + static inline size_t reverse_bits( size_t nHash ) + { + return bitop::RBO( nHash ); + } + + static inline size_t regular_hash( size_t nHash ) + { + return reverse_bits( nHash ) | size_t(1); + } + + static inline size_t dummy_hash( size_t nHash ) + { + return reverse_bits( nHash ) & ~size_t(1); + } + //@endcond + + } // namespace split_list + + //@cond + // Forward declaration + template + class SplitListSet; + //@endcond + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_SPLIT_LIST_BASE_H diff --git a/cds/intrusive/split_list_nogc.h b/cds/intrusive/split_list_nogc.h new file mode 100644 index 00000000..a5b54570 --- /dev/null +++ b/cds/intrusive/split_list_nogc.h @@ -0,0 +1,624 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_SPLIT_LIST_NOGC_H +#define __CDS_INTRUSIVE_SPLIT_LIST_NOGC_H + +#include +#include + +namespace cds { namespace intrusive { + + /// Split-ordered list (template specialization for gc::nogc) + /** @ingroup cds_intrusive_map + \anchor cds_intrusive_SplitListSet_nogc + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of list item. + + See \ref cds_intrusive_SplitListSet_hp "SplitListSet" for description of template parameters. + The template parameter \p OrderedList should be any gc::nogc-derived ordered list, for example, + \ref cds_intrusive_MichaelList_nogc "persistent MichaelList", + \ref cds_intrusive_LazyList_nogc "persistent LazyList" + + The interface of the specialization is a slightly different. + */ + template < + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = split_list::type_traits +#else + class Traits +#endif + > + class SplitListSet< gc::nogc, OrderedList, Traits > + { + public: + typedef Traits options ; ///< Traits template parameters + typedef gc::nogc gc ; ///< Garbage collector + + /// Hash functor for \ref value_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename options::hash >::type hash; + + protected: + //@cond + typedef split_list::details::rebind_list_options wrapped_ordered_list; + //@endcond + + public: +# ifdef CDS_DOXYGEN_INVOKED + typedef OrderedList ordered_list ; ///< type of ordered list used as base for split-list +# else + typedef typename wrapped_ordered_list::result ordered_list; +# endif + typedef typename ordered_list::value_type value_type ; ///< type of value stored in the split-list + typedef typename ordered_list::key_comparator key_comparator ; ///< key comparison functor + typedef typename ordered_list::disposer disposer ; ///< Node disposer functor + + typedef typename options::item_counter item_counter ; ///< Item counter type + typedef typename options::back_off back_off ; ///< back-off strategy for spinning + typedef typename options::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + protected: + typedef typename ordered_list::node_type list_node_type ; ///< Node type as declared in ordered list + typedef split_list::node node_type ; ///< split-list node type + typedef node_type dummy_node_type ; ///< dummy node type + + /// Split-list node traits + /** + This traits is intended for converting between underlying ordered list node type \ref list_node_type + and split-list node type \ref node_type + */ + typedef split_list::node_traits node_traits; + + //@cond + /// Bucket table implementation + typedef typename split_list::details::bucket_table_selector< + options::dynamic_bucket_table + , gc + , dummy_node_type + , opt::allocator< typename options::allocator > + , opt::memory_model< memory_model > + >::type bucket_table; + + typedef typename ordered_list::iterator list_iterator; + typedef typename ordered_list::const_iterator list_const_iterator; + //@endcond + + protected: + //@cond + /// Ordered list wrapper to access protected members + class ordered_list_wrapper: public ordered_list + { + typedef ordered_list base_class; + typedef typename base_class::auxiliary_head bucket_head_type; + + public: + list_iterator insert_at_( dummy_node_type * pHead, value_type& val ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(static_cast(pHead)); + return base_class::insert_at_( h, val ); + } + + template + std::pair ensure_at_( dummy_node_type * pHead, value_type& val, Func func ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(static_cast(pHead)); + return base_class::ensure_at_( h, val, func ); + } + + template + bool find_at( dummy_node_type * pHead, split_list::details::search_value_type& val, Compare cmp, Func f ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(static_cast(pHead)); + return base_class::find_at( h, val, cmp, f ); + } + + template + list_iterator find_at_( dummy_node_type * pHead, split_list::details::search_value_type const & val, Compare cmp ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(static_cast(pHead)); + return base_class::find_at_( h, val, cmp ); + } + + bool insert_aux_node( dummy_node_type * pNode ) + { + return base_class::insert_aux_node( pNode ); + } + bool insert_aux_node( dummy_node_type * pHead, dummy_node_type * pNode ) + { + bucket_head_type h(static_cast(pHead)); + return base_class::insert_aux_node( h, pNode ); + } + }; + + //@endcond + + protected: + ordered_list_wrapper m_List ; ///< Ordered list containing split-list items + bucket_table m_Buckets ; ///< bucket table + CDS_ATOMIC::atomic m_nBucketCountLog2 ; ///< log2( current bucket count ) + item_counter m_ItemCounter ; ///< Item counter + hash m_HashFunctor ; ///< Hash functor + + protected: + //@cond + typedef cds::details::Allocator< dummy_node_type, typename options::allocator > dummy_node_allocator; + static dummy_node_type * alloc_dummy_node( size_t nHash ) + { + return dummy_node_allocator().New( nHash ); + } + static void free_dummy_node( dummy_node_type * p ) + { + dummy_node_allocator().Delete( p ); + } + + /// Calculates hash value of \p key + template + size_t hash_value( Q const& key ) const + { + return m_HashFunctor( key ); + } + + size_t bucket_no( size_t nHash ) const + { + return nHash & ( (1 << m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed)) - 1 ); + } + + static size_t parent_bucket( size_t nBucket ) + { + assert( nBucket > 0 ); + return nBucket & ~( 1 << bitop::MSBnz( nBucket ) ); + } + + dummy_node_type * init_bucket( size_t nBucket ) + { + assert( nBucket > 0 ); + size_t nParent = parent_bucket( nBucket ); + + dummy_node_type * pParentBucket = m_Buckets.bucket( nParent ); + if ( pParentBucket == null_ptr() ) { + pParentBucket = init_bucket( nParent ); + } + + assert( pParentBucket != null_ptr() ); + + // Allocate a dummy node for new bucket + { + dummy_node_type * pBucket = alloc_dummy_node( split_list::dummy_hash( nBucket ) ); + if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) { + m_Buckets.bucket( nBucket, pBucket ); + return pBucket; + } + free_dummy_node( pBucket ); + } + + // Another thread set the bucket. Wait while it done + + // In this point, we must wait while nBucket is empty. + // The compiler can decide that waiting loop can be "optimized" (stripped) + // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer. + // + back_off bkoff; + while ( true ) { + dummy_node_type volatile * p = m_Buckets.bucket( nBucket ); + if ( p && p != null_ptr() ) + return const_cast( p ); + bkoff(); + } + } + + dummy_node_type * get_bucket( size_t nHash ) + { + size_t nBucket = bucket_no( nHash ); + + dummy_node_type * pHead = m_Buckets.bucket( nBucket ); + if ( pHead == null_ptr() ) + pHead = init_bucket( nBucket ); + + assert( pHead->is_dummy() ); + + return pHead; + } + + void init() + { + // GC and OrderedList::gc must be the same + static_assert(( std::is_same::value ), "GC and OrderedList::gc must be the same"); + + // atomicity::empty_item_counter is not allowed as a item counter + static_assert(( !std::is_same::value ), "atomicity::empty_item_counter is not allowed as a item counter"); + + // Initialize bucket 0 + dummy_node_type * pNode = alloc_dummy_node( 0 /*split_list::dummy_hash(0)*/ ); + + // insert_aux_node cannot return false for empty list + CDS_VERIFY( m_List.insert_aux_node( pNode )); + + m_Buckets.bucket( 0, pNode ); + } + + void inc_item_count() + { + size_t sz = m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed); + if ( ( ++m_ItemCounter >> sz ) > m_Buckets.load_factor() && ((size_t)(1 << sz )) < m_Buckets.capacity() ) + { + m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ); + } + } + + //@endcond + + public: + /// Initialize split-ordered list of default capacity + /** + The default capacity is defined in bucket table constructor. + See split_list::expandable_bucket_table, split_list::static_ducket_table + which selects by split_list::dynamic_bucket_table option. + */ + SplitListSet() + : m_nBucketCountLog2(1) + { + init(); + } + + /// Initialize split-ordered list + SplitListSet( + size_t nItemCount ///< estimate average of item count + , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 10, default is 1. + ) + : m_Buckets( nItemCount, nLoadFactor ) + , m_nBucketCountLog2(1) + { + init(); + } + + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert_( val ) != end(); + } + + /// Ensures that the \p item exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the set, then \p val is inserted into the set. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + struct ensure_functor { + void operator()( bool bNew, value_type& item, value_type& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same thing. + + The functor can change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You can pass \p func argument by value by reference using boost::ref or cds::ref. + + Returns std::pair where \p first is \p true if operation is successfull, + \p second is \p true if new item has been added or \p false if the item with given key + already is in the set. + */ + template + std::pair ensure( value_type& val, Func func ) + { + std::pair ret = ensure_( val, func ); + return std::make_pair( ret.first != end(), ret.second ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_SplitListSet_nogc_find_val + The function searches the item with key equal to \p val + and returns pointer to item found or , and \p NULL otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + value_type * find( Q const & val ) + { + iterator it = find_( val ); + if ( it == end() ) + return null_ptr(); + return &*it; + } + + /// Finds the key \p val with \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SplitListSet_nogc_find_val "find(Q const&)" + but \p cmp is used for key compare. + \p Less has the interface like \p std::less. + \p cmp must imply the same element order as the comparator used for building the set. + */ + template + value_type * find_with( Q const& val, Less pred ) + { + iterator it = find_with_( val, pred ); + if ( it == end() ) + return null_ptr(); + return &*it; + } + + /// Finds the key \p val + /** \anchor cds_intrusive_SplitListSet_nogc_find_func + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return find_( val, key_comparator(), f ); + } + + /// Finds the key \p val with \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SplitListSet_nogc_find_func "find(Q&, Func)" + but \p cmp is used for key compare. + \p Less has the interface like \p std::less. + \p cmp must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { + return find_( val, typename wrapped_ordered_list::template make_compare_from_less(), f ); + } + + /// Find the key \p val + /** \anchor cds_intrusive_SplitListSet_nogc_find_cfunc + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return find_( val, key_comparator(), f ); + } + + /// Finds the key \p val with \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SplitListSet_nogc_find_cfunc "find(Q const&, Func)" + but \p cmp is used for key compare. + \p Less has the interface like \p std::less. + \p cmp must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { + return find_( val, typename wrapped_ordered_list::template make_compare_from_less(), f ); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, the correct item counting feature is an important part of split-list implementation. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + size_t size() const + { + return m_ItemCounter; + } + + protected: + //@cond + template + class iterator_type + :public split_list::details::iterator_type + { + typedef split_list::details::iterator_type iterator_base_class; + typedef typename iterator_base_class::list_iterator list_iterator; + public: + iterator_type() + : iterator_base_class() + {} + + iterator_type( iterator_type const& src ) + : iterator_base_class( src ) + {} + + // This ctor should be protected... + iterator_type( list_iterator itCur, list_iterator itEnd ) + : iterator_base_class( itCur, itEnd ) + {} + }; + //@endcond + + public: + /// Forward iterator + /** + The forward iterator for a split-list has some features: + - it has no post-increment operator + - it depends on iterator of underlying \p OrderedList + */ + typedef iterator_type iterator; + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a split-list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_List.begin(), m_List.end() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a split-list + /** + Do not use the value returned by end function to access any item. + + The returned value can be used only to control reaching the end of the split-list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_List.end(), m_List.end() ); + } + + /// Returns a forward const iterator addressing the first element in a split-list + //@{ + const_iterator begin() const + { + return const_iterator( m_List.begin(), m_List.end() ); + } + const_iterator cbegin() + { + return const_iterator( m_List.cbegin(), m_List.cend() ); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a split-list + //@{ + const_iterator end() const + { + return const_iterator( m_List.end(), m_List.end() ); + } + const_iterator cend() + { + return const_iterator( m_List.cend(), m_List.cend() ); + } + //@} + + protected: + //@cond + iterator insert_( value_type& val ) + { + size_t nHash = hash_value( val ); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + + list_iterator it = m_List.insert_at_( pHead, val ); + if ( it != m_List.end() ) { + inc_item_count(); + return iterator( it, m_List.end() ); + } + return end(); + } + + template + std::pair ensure_( value_type& val, Func func ) + { + size_t nHash = hash_value( val ); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + + std::pair ret = m_List.ensure_at_( pHead, val, func ); + if ( ret.first != m_List.end() ) { + if ( ret.second ) + inc_item_count(); + return std::make_pair( iterator(ret.first, m_List.end()), ret.second ); + } + return std::make_pair( end(), ret.second ); + } + + template + iterator find_with_( Q const& val, Less pred ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + return iterator( m_List.find_at_( pHead, sv, typename wrapped_ordered_list::template make_compare_from_less() ), m_List.end() ); + } + + template + iterator find_( Q const& val ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + return iterator( m_List.find_at_( pHead, sv, key_comparator() ), m_List.end() ); + + } + + template + bool find_( Q& val, Compare cmp, Func f ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return m_List.find_at( pHead, sv, cmp, + [&f](value_type& item, split_list::details::search_value_type& val){ cds::unref(f)(item, val.val ); }); +# else + split_list::details::find_functor_wrapper ffw( f ); + return m_List.find_at( pHead, sv, cmp, cds::ref(ffw) ); +# endif + } + + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_SPLIT_LIST_NOGC_H diff --git a/cds/intrusive/split_list_rcu.h b/cds/intrusive/split_list_rcu.h new file mode 100644 index 00000000..2d655c1c --- /dev/null +++ b/cds/intrusive/split_list_rcu.h @@ -0,0 +1,1011 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_SPLIT_LIST_RCU_H +#define __CDS_INTRUSIVE_SPLIT_LIST_RCU_H + +#include +#include + +namespace cds { namespace intrusive { + + /// Split-ordered list RCU specialization + /** @ingroup cds_intrusive_map + \anchor cds_intrusive_SplitListSet_rcu + + Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see + - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables" + - [2008] Nir Shavit "The Art of Multiprocessor Programming" + + The split-ordered list is a lock-free implementation of an extensible unbounded hash table. It uses original + recursive split-ordering algorithm discovered by Ori Shalev and Nir Shavit that allows to split buckets + without moving an item on resizing, see \ref cds_SplitList_algo_desc "short algo description". + + Implementation + + Template parameters are: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p OrderedList - ordered list implementation used as bucket for hash set, for example, MichaelList, LazyList. + The intrusive ordered list implementation specifies the type \p T stored in the hash-set, + the comparing functor for the type \p T and other features specific for the ordered list. + - \p Traits - type traits. See split_list::type_traits for explanation. + Instead of defining \p Traits struct you may use option-based syntax with split_list::make_traits metafunction. + + @note About features of hash functor needed for \p %SplitList see \ref cds_SplitList_hash_functor "SplitList general description". + + \par How to use + Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" and + MichaelList-based split-list you should include: + \code + #include + #include + #include + + // Declare Michael's list for type Foo and default traits: + typedef cds::intrusive::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, Foo > rcu_michael_list; + + // Declare split-list based on rcu_michael_list + typedef cds::intrusive::SplitListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, rcu_michael_list > rcu_split_list; + \endcode + + */ + template < + class RCU, + class OrderedList, +# ifdef CDS_DOXYGEN_INVOKED + class Traits = split_list::type_traits +# else + class Traits +# endif + > + class SplitListSet< cds::urcu::gc< RCU >, OrderedList, Traits > + { + public: + typedef Traits options ; ///< Traits template parameters + typedef cds::urcu::gc< RCU > gc ; ///< RCU garbage collector + + /// Hash functor for \ref value_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename options::hash >::type hash; + + protected: + //@cond + typedef split_list::details::rebind_list_options wrapped_ordered_list; + //@endcond + + public: +# ifdef CDS_DOXYGEN_INVOKED + typedef OrderedList ordered_list ; ///< type of ordered list used as base for split-list +# else + typedef typename wrapped_ordered_list::result ordered_list; +# endif + typedef typename ordered_list::value_type value_type ; ///< type of value stored in the split-list + typedef typename ordered_list::key_comparator key_comparator ; ///< key compare functor + typedef typename ordered_list::disposer disposer ; ///< Node disposer functor + typedef typename ordered_list::rcu_lock rcu_lock ; ///< RCU scoped lock + typedef typename ordered_list::exempt_ptr exempt_ptr ; ///< pointer to extracted node + /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that + static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal; + + typedef typename options::item_counter item_counter ; ///< Item counter type + typedef typename options::back_off back_off ; ///< back-off strategy for spinning + typedef typename options::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + + protected: + typedef typename ordered_list::node_type list_node_type ; ///< Node type as declared in ordered list + typedef split_list::node node_type ; ///< split-list node type + typedef node_type dummy_node_type ; ///< dummy node type + + /// Split-list node traits + /** + This traits is intended for converting between underlying ordered list node type \ref list_node_type + and split-list node type \ref node_type + */ + typedef split_list::node_traits node_traits; + + //@cond + /// Bucket table implementation + typedef typename split_list::details::bucket_table_selector< + options::dynamic_bucket_table + , gc + , dummy_node_type + , opt::allocator< typename options::allocator > + , opt::memory_model< memory_model > + >::type bucket_table; + + //@endcond + + protected: + //@cond + /// Ordered list wrapper to access protected members of OrderedList + class ordered_list_wrapper: public ordered_list + { + typedef ordered_list base_class; + typedef typename base_class::auxiliary_head bucket_head_type; + + public: + bool insert_at( dummy_node_type * pHead, value_type& val ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::insert_at( h, val ); + } + + template + bool insert_at( dummy_node_type * pHead, value_type& val, Func f ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::insert_at( h, val, f ); + } + + template + std::pair ensure_at( dummy_node_type * pHead, value_type& val, Func func ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::ensure_at( h, val, func ); + } + + bool unlink_at( dummy_node_type * pHead, value_type& val ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::unlink_at( h, val ); + } + + template + bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp, Func f ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::erase_at( h, val, cmp, f ); + } + + template + bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::erase_at( h, val, cmp ); + } + + template + value_type * extract_at( dummy_node_type * pHead, split_list::details::search_value_type& val, Compare cmp ) + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::extract_at( h, val, cmp ); + } + + template + bool find_at( dummy_node_type * pHead, split_list::details::search_value_type& val, Compare cmp, Func f ) const + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::find_at( h, val, cmp, f ); + } + + template + bool find_at( dummy_node_type * pHead, split_list::details::search_value_type const & val, Compare cmp ) const + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::find_at( h, val, cmp ); + } + + template + value_type * get_at( dummy_node_type * pHead, split_list::details::search_value_type& val, Compare cmp ) const + { + assert( pHead != null_ptr() ); + bucket_head_type h(pHead); + return base_class::get_at( h, val, cmp ); + } + + bool insert_aux_node( dummy_node_type * pNode ) + { + return base_class::insert_aux_node( pNode ); + } + bool insert_aux_node( dummy_node_type * pHead, dummy_node_type * pNode ) + { + bucket_head_type h(pHead); + return base_class::insert_aux_node( h, pNode ); + } + }; + + template + struct less_wrapper: public cds::opt::details::make_comparator_from_less + { + typedef cds::opt::details::make_comparator_from_less base_wrapper; + + template + int operator()( split_list::details::search_value_type const& v1, Q2 const& v2 ) const + { + return base_wrapper::operator()( v1.val, v2 ); + } + + template + int operator()( Q1 const& v1, split_list::details::search_value_type const& v2 ) const + { + return base_wrapper::operator()( v1, v2.val ); + } + }; + //@endcond + + protected: + ordered_list_wrapper m_List ; ///< Ordered list containing split-list items + bucket_table m_Buckets ; ///< bucket table + CDS_ATOMIC::atomic m_nBucketCountLog2 ; ///< log2( current bucket count ) + item_counter m_ItemCounter ; ///< Item counter + hash m_HashFunctor ; ///< Hash functor + + protected: + //@cond + typedef cds::details::Allocator< dummy_node_type, typename options::allocator > dummy_node_allocator; + static dummy_node_type * alloc_dummy_node( size_t nHash ) + { + return dummy_node_allocator().New( nHash ); + } + static void free_dummy_node( dummy_node_type * p ) + { + dummy_node_allocator().Delete( p ); + } + + /// Calculates hash value of \p key + template + size_t hash_value( Q const& key ) const + { + return m_HashFunctor( key ); + } + + size_t bucket_no( size_t nHash ) const + { + return nHash & ( (1 << m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed)) - 1 ); + } + + static size_t parent_bucket( size_t nBucket ) + { + assert( nBucket > 0 ); + return nBucket & ~( 1 << bitop::MSBnz( nBucket ) ); + } + + dummy_node_type * init_bucket( size_t nBucket ) + { + assert( nBucket > 0 ); + size_t nParent = parent_bucket( nBucket ); + + dummy_node_type * pParentBucket = m_Buckets.bucket( nParent ); + if ( pParentBucket == null_ptr() ) { + pParentBucket = init_bucket( nParent ); + } + + assert( pParentBucket != null_ptr() ); + + // Allocate a dummy node for new bucket + { + dummy_node_type * pBucket = alloc_dummy_node( split_list::dummy_hash( nBucket ) ); + if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) { + m_Buckets.bucket( nBucket, pBucket ); + return pBucket; + } + free_dummy_node( pBucket ); + } + + // Another thread set the bucket. Wait while it done + + // In this point, we must wait while nBucket is empty. + // The compiler can decide that waiting loop can be "optimized" (stripped) + // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer. + // + back_off bkoff; + while ( true ) { + dummy_node_type volatile * p = m_Buckets.bucket( nBucket ); + if ( p != null_ptr() ) + return const_cast( p ); + bkoff(); + } + } + + dummy_node_type * get_bucket( size_t nHash ) + { + size_t nBucket = bucket_no( nHash ); + + dummy_node_type * pHead = m_Buckets.bucket( nBucket ); + if ( pHead == null_ptr() ) + pHead = init_bucket( nBucket ); + + assert( pHead->is_dummy() ); + + return pHead; + } + + void init() + { + // GC and OrderedList::gc must be the same + static_assert(( std::is_same::value ), "GC and OrderedList::gc must be the same"); + + // atomicity::empty_item_counter is not allowed as a item counter + static_assert(( !std::is_same::value ), "atomicity::empty_item_counter is not allowed as a item counter"); + + // Initialize bucket 0 + dummy_node_type * pNode = alloc_dummy_node( 0 /*split_list::dummy_hash(0)*/ ); + + // insert_aux_node cannot return false for empty list + CDS_VERIFY( m_List.insert_aux_node( pNode )); + + m_Buckets.bucket( 0, pNode ); + } + + void inc_item_count() + { + size_t sz = m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed); + if ( ( ++m_ItemCounter >> sz ) > m_Buckets.load_factor() && ((size_t)(1 << sz )) < m_Buckets.capacity() ) + { + m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ); + } + } + + template + bool find_( Q& val, Compare cmp, Func f ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return m_List.find_at( pHead, sv, cmp, + [&f](value_type& item, split_list::details::search_value_type& val){ cds::unref(f)(item, val.val ); }); +# else + split_list::details::find_functor_wrapper ffw( f ); + return m_List.find_at( pHead, sv, cmp, cds::ref(ffw) ); +# endif + } + + template + bool find_value( Q const& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + return m_List.find_at( pHead, sv, cmp ); + } + + template + value_type * get_( Q const& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + return m_List.get_at( pHead, sv, cmp ); + } + + template + value_type * extract_( Q const& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + value_type * pNode = m_List.extract_at( pHead, sv, cmp ); + if ( pNode ) + --m_ItemCounter; + return pNode; + } + + template + value_type * extract_with_( Q const& val, Less pred ) + { + return extract_( val, typename wrapped_ordered_list::template make_compare_from_less()); + } + + template + bool erase_( const Q& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + if ( m_List.erase_at( pHead, sv, cmp ) ) { + --m_ItemCounter; + return true; + } + return false; + } + + template + bool erase_( Q const& val, Compare cmp, Func f ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + if ( m_List.erase_at( pHead, sv, cmp, f )) { + --m_ItemCounter; + return true; + } + return false; + } + + //@endcond + + public: + /// Initialize split-ordered list of default capacity + /** + The default capacity is defined in bucket table constructor. + See split_list::expandable_bucket_table, split_list::static_ducket_table + which selects by split_list::dynamic_bucket_table option. + */ + SplitListSet() + : m_nBucketCountLog2(1) + { + init(); + } + + /// Initialize split-ordered list + SplitListSet( + size_t nItemCount ///< estimate average of item count + , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 8, default is 1. + ) + : m_Buckets( nItemCount, nLoadFactor ) + , m_nBucketCountLog2(1) + { + init(); + } + + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + The function makes RCU lock internally. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + size_t nHash = hash_value( val ); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + + if ( m_List.insert_at( pHead, val )) { + inc_item_count(); + return true; + } + return false; + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success and may be passed by reference + using boost::ref + + The function makes RCU lock internally. + */ + template + bool insert( value_type& val, Func f ) + { + size_t nHash = hash_value( val ); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + + if ( m_List.insert_at( pHead, val, f )) { + inc_item_count(); + return true; + } + return false; + } + + /// Ensures that the \p val exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted into the set. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same thing. + + The functor can change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + You can pass \p func argument by value or by reference using boost::ref or cds::ref. + + The function makes RCU lock internally. + + Returns std::pair where \p first is \p true if operation is successfull, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair ensure( value_type& val, Func func ) + { + size_t nHash = hash_value( val ); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + + std::pair bRet = m_List.ensure_at( pHead, val, func ); + if ( bRet.first && bRet.second ) + inc_item_count(); + return bRet; + } + + /// Unlinks the item \p val from the set + /** + The function searches the item \p val in the set and unlinks it from the set + if it is found and is equal to \p val. + + Difference between \ref erase and \p unlink functions: \p erase finds a key + and deletes the item found. \p unlink finds an item by key and deletes it + only if \p val is an item of that set, i.e. the pointer to item found + is equal to &val . + + RCU \p synchronize method can be called, therefore, RCU should not be locked. + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + size_t nHash = hash_value( val ); + dummy_node_type * pHead = get_bucket( nHash ); + assert( pHead != null_ptr() ); + + if ( m_List.unlink_at( pHead, val ) ) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_SplitListSet_rcu_erase + The function searches an item with key equal to \p val in the set, + unlinks it from the set, and returns \p true. + If the item with key equal to \p val is not found the function return \p false. + + Difference between \ref erase and \p unlink functions: \p erase finds a key + and deletes the item found. \p unlink finds an item by key and deletes it + only if \p val is an item of that set, i.e. the pointer to item found + is equal to &val . + + RCU \p synchronize method can be called, therefore, RCU should not be locked. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( Q const& val ) + { + return erase_( val, key_comparator() ); + } + + /// Deletes the item from the set using \p pred for searching + /** + The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase "erase(Q const&)" + but \p cmp is used for key compare. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& val, Less pred ) + { + return erase_( val, typename wrapped_ordered_list::template make_compare_from_less() ); + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_SplitListSet_rcu_erase_func + The function searches an item with key equal to \p val in the set, + call \p f functor with item found, unlinks it from the set, and returns \p true. + The \ref disposer specified by \p OrderedList class template parameter is called + by garbage collector \p GC asynchronously. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + The functor can be passed by reference with boost:ref + + If the item with key equal to \p val is not found the function return \p false. + + RCU \p synchronize method can be called, therefore, RCU should not be locked. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( Q const& val, Func f ) + { + return erase_( val, key_comparator(), f ); + } + + /// Deletes the item from the set using \p pred for searching + /** + The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase_func "erase(Q const&, Func)" + but \p cmp is used for key compare. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& val, Less pred, Func f ) + { + return erase_( val, typename wrapped_ordered_list::template make_compare_from_less(), f ); + } + + /// Extracts an item from the set + /** \anchor cds_intrusive_SplitListSet_rcu_extract + The function searches an item with key equal to \p val in the set, + unlinks it, and returns pointer to an item found in \p dest argument. + If the item with the key equal to \p val is not found the function returns \p false. + + @note The function does NOT call RCU read-side lock or synchronization, + and does NOT dispose the item found. It just excludes the item from the set + and returns a pointer to item found. + You should lock RCU before calling of the function, and you should synchronize RCU + outside the RCU lock before reusing returned pointer. + + \code + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::intrusive::MichaelList< rcu, Foo > rcu_michael_list; + typedef cds::intrusive::SplitListSet< rcu, rcu_michael_list, foo_traits > rcu_splitlist_set; + + rcu_splitlist_set theSet; + // ... + + rcu_splitlist_set::exempt_ptr p; + { + // first, we should lock RCU + rcu_splitlist_set::rcu_lock lock; + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + if ( theList.extract( p, 10 )) { + // do something with p + ... + } + } + + // We may safely release p here + // release() passes the pointer to RCU reclamation cycle: + // it invokes RCU retire_ptr function with the disposer you provided for rcu_michael_list. + p.release(); + \endcode + */ + template + bool extract( exempt_ptr& dest, Q const& val ) + { + value_type * pNode = extract_( val, key_comparator() ); + if ( pNode ) { + dest = pNode; + return true; + } + return false; + } + + /// Extracts an item from the set using \p pred for searching + /** + The function is an analog of \ref cds_intrusive_SplitListSet_rcu_extract "extract(exempt_ptr&, Q const&)" + but \p pred is used for key compare. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool extract_with( exempt_ptr& dest, Q const& val, Less pred ) + { + value_type * pNode = extract_with_( val, pred ); + if ( pNode ) { + dest = pNode; + return true; + } + return false; + } + + /// Finds the key \p val + /** \anchor cds_intrusive_SplitListSet_rcu_find_func + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function applies RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return find_( val, key_comparator(), f ); + } + + /// Finds the key \p val with \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_func "find(Q&, Func)" + but \p cmp is used for key compare. + \p Less has the interface like \p std::less. + \p cmp must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { + return find_( val, typename wrapped_ordered_list::template make_compare_from_less(), f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_SplitListSet_rcu_find_cfunc + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by value or by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function applies RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return find_( val, key_comparator(), f ); + } + + /// Finds the key \p val with \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_cfunc "find(Q const&, Func)" + but \p cmp is used for key compare. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { + return find_( val, typename wrapped_ordered_list::template make_compare_from_less(), f ); + } + + /// Finds the key \p val + /** \anchor cds_intrusive_SplitListSet_rcu_find_val + The function searches the item with key equal to \p val + and returns \p true if \p val found or \p false otherwise. + */ + template + bool find( Q const& val ) + { + return find_value( val, key_comparator() ); + } + + /// Finds the key \p val with \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_val "find(Q const&)" + but \p cmp is used for key compare. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred ) + { + return find_value( val, typename wrapped_ordered_list::template make_compare_from_less() ); + } + + /// Finds the key \p val and return the item found + /** \anchor cds_intrusive_SplitListSet_rcu_get + The function searches the item with key equal to \p val and returns the pointer to item found. + If \p val is not found it returns \p NULL. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + cds::intrusive::SplitListSet< your_template_parameters > theSet; + // ... + { + // Lock RCU + hash_set::rcu_lock lock; + + foo * pVal = theSet.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + // Unlock RCU by rcu_lock destructor + // pVal can be retired by disposer at any time after RCU has been unlocked + } + \endcode + */ + template + value_type * get( Q const& val ) + { + return get_( val, key_comparator() ); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_intrusive_SplitListSet_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + value_type * get_with( Q const& val, Less pred ) + { + return get_( val, typename wrapped_ordered_list::template make_compare_from_less()); + } + + + /// Returns item count in the set + size_t size() const + { + return m_ItemCounter; + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, the correct item counting feature is an important part of split-list set implementation. + */ + bool empty() const + { + return size() == 0; + } + + /// Clears the set (non-atomic) + /** + The function unlink all items from the set. + The function is not atomic. Therefore, \p clear may be used only for debugging purposes. + */ + void clear() + { + iterator it = begin(); + while ( it != end() ) { + iterator i(it); + ++i; + unlink( *it ); + it = i; + } + } + + protected: + //@cond + template + class iterator_type + :public split_list::details::iterator_type + { + typedef split_list::details::iterator_type iterator_base_class; + typedef typename iterator_base_class::list_iterator list_iterator; + public: + iterator_type() + : iterator_base_class() + {} + + iterator_type( iterator_type const& src ) + : iterator_base_class( src ) + {} + + // This ctor should be protected... + iterator_type( list_iterator itCur, list_iterator itEnd ) + : iterator_base_class( itCur, itEnd ) + {} + }; + //@endcond + public: + /// Forward iterator + /** + The forward iterator for a split-list has some features: + - it has no post-increment operator + - it depends on iterator of underlying \p OrderedList + - The iterator cannot be moved across thread boundary since it may contain GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the split-list. + + Therefore, the use of iterators in concurrent environment is not good idea. Use the iterator on the concurrent container + for debug purpose only. + */ + typedef iterator_type iterator; + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a split-list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_List.begin(), m_List.end() ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a split-list + /** + Do not use the value returned by end function to access any item. + + The returned value can be used only to control reaching the end of the split-list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_List.end(), m_List.end() ); + } + + /// Returns a forward const iterator addressing the first element in a split-list + const_iterator begin() const + { + return const_iterator( m_List.begin(), m_List.end() ); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a split-list + const_iterator end() const + { + return const_iterator( m_List.end(), m_List.end() ); + } + + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_SPLIT_LIST_RCU_H diff --git a/cds/intrusive/striped_set.h b/cds/intrusive/striped_set.h new file mode 100644 index 00000000..0c1a1acc --- /dev/null +++ b/cds/intrusive/striped_set.h @@ -0,0 +1,894 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_STRIPED_SET_H +#define __CDS_INTRUSIVE_STRIPED_SET_H + +#include +#include +#include + +namespace cds { namespace intrusive { + /// StripedSet related definitions + namespace striped_set { + } // namespace striped_set + + /// Striped hash set + /** @ingroup cds_intrusive_map + + Source + - [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming" + + Lock striping is very simple technique. + The set consists of the bucket table and the array of locks. + Initially, the capacity of lock array and bucket table is the same. + When set is resized, bucket table capacity will be doubled but lock array will not. + The lock \p i protects each bucket \p j, where j = i mod L , + where \p L - the size of lock array. + + Template arguments: + - \p Container - the container class that is used as bucket table entry. The \p Container class should support + an uniform interface described below. + - \p Options - options + + The \p %StripedSet class does not exactly dictate the type of container that should be used as a \p Container bucket. + Instead, the class supports different intrusive container type for the bucket, for exampe, \p boost::intrusive::list, \p boost::intrusive::set and others. + + Remember that \p %StripedSet class algorithm ensures sequential blocking access to its bucket through the mutex type you specify + among \p Options template arguments. + + The \p Options are: + - opt::mutex_policy - concurrent access policy. + Available policies: striped_set::striping, striped_set::refinable. + Default is striped_set::striping. + - cds::opt::hash - hash functor. Default option value see opt::v::hash_selector which selects default hash functor for + your compiler. + - cds::opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - cds::opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - cds::opt::item_counter - item counter type. Default is \p atomicity::item_counter since some operation on the counter is performed + without locks. Note that item counting is an essential part of the set algorithm, so dummy type like atomicity::empty_item_counter + is not suitable. + - cds::opt::allocator - the allocator type using for memory allocation of bucket table and lock array. Default is CDS_DEFAULT_ALLOCATOR. + - cds::opt::resizing_policy - the resizing policy that is a functor that decides when to resize the hash set. + Default option value depends on bucket container type: + for sequential containers like \p boost::intrusive::list the resizing policy is cds::container::striped_set::load_factor_resizing <4>; + for other type of containers like \p boost::intrusive::set the resizing policy is cds::container::striped_set::no_resizing. + See cds::container::striped_set namespace for list of all possible types of the option. + Note that the choose of resizing policy depends of \p Container type: + for sequential containers like \p boost::intrusive::list right choosing of the policy can significantly improve performance. + For other, non-sequential types of \p Container (like a \p boost::intrusive::set) the resizing policy is not so important. + - cds::opt::buffer - a buffer type used only for boost::intrusive::unordered_set. + Default is cds::opt::v::static_buffer< cds::any_type, 256 >. + + opt::compare or opt::less options are used in some \p Container class for ordering. + opt::compare option has the highest priority: if opt::compare is specified, opt::less is not used. + + You can pass other option that would be passed to adapt metafunction, see below. + + Internal details + + The \p %StripedSet class cannot utilize the \p Container container specified directly, but only its adapted variant which + supports an unified interface. Internally, the adaptation is made via intrusive::striped_set::adapt metafunction that wraps bucket container + and provides the unified bucket interface suitable for \p %StripedSet. Such adaptation is completely transparent for you - + you don't need to call \p adapt metafunction directly, \p %StripedSet class's internal machinery itself invokes appropriate + \p adapt metafunction specialization to adjust your \p Container container class to \p %StripedSet bucket's internal interface. + All you need is to include a right header before striped_set.h. + + By default, intrusive::striped_set::adapt metafunction does not make any wrapping to \p AnyContainer, + so, the result intrusive::striped_set::adapt::type is the same as \p AnyContainer. + However, there are a lot of specializations of \p %intrusive::striped_set::adapt for \p boost::intrusive containers, see table below. + Any of this specialization wraps corresponding container making it suitable for the set's bucket. + Remember, you should include the proper header file for \p adapt before including striped_set.h. + + \note It is important to specify boost::intrusive::constant_time_size option + for all \p boost::intrusive container that supports this option. Fast item counting feature is essential part of + \p %StripedSet resizing algorithm. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Container.h-file for \p adaptExampleNotes
\p boost::intrusive::list\code + #include + #include + typedef cds::intrusive::StripedSet< + boost::intrusive::list >, + cds::opt::less< std::less > + > striped_set; + \endcode + + The list is ordered. + Template argument pack \p Options must contain cds::opt::less or cds::opt::compare for type \p T stored in the list +
\p boost::intrusive::slist\code + #include + #include + typedef cds::intrusive::StripedSet< + boost::intrusive::slist >, + cds::opt::less< std::less > + > striped_set; + \endcode + + The list is ordered. + Template argument pack \p Options must contain cds::opt::less or cds::opt::compare for type \p T stored in the list +
\p boost::intrusive::set\code + #include + #include + typedef cds::intrusive::StripedSet< + boost::intrusive::set > + > striped_set; + \endcode + + Note that \p boost::intrusive::compare option using in \p boost::intrusive::set + should support \p T type stored in the set and any type \p Q that you can use + in \p erase and \p find member functions. +
\p boost::intrusive::unordered_set\code + #include + #include + typedef cds::intrusive::StripedSet< + boost::intrusive::unordered_set + ,boost::intrusive::hash< user_provided_hash_functor > + > + > striped_set; + \endcode + + You should provide two different hash function h1 and h2 - one for boost::intrusive::unordered_set + and other for %StripedSet. For the best result, h1 and h2 must be orthogonal i.e. h1(X) != h2(X) for any value X + + The option opt::buffer is used for boost::intrusive::bucket_traits. Default is cds::opt::v::static_buffer< cds::any_type, 256 >. + The resizing policy should correlate with the buffer capacity. + The default resizing policy is cds::container::striped_set::load_factor_resizing<256> what gives load factor 1 for + default bucket buffer that is the best for \p boost::intrusive::unordered_set. +
\p boost::intrusive::avl_set\code + #include + #include + typedef cds::intrusive::StripedSet< + boost::intrusive::avl_set > + > striped_set; + \endcode + + Note that \p boost::intrusive::compare option using in \p boost::intrusive::avl_set + should support \p T type stored in the set and any type \p Q that you can use + in \p erase and \p find member functions. +
\p boost::intrusive::sg_set\code + #include + #include + typedef cds::intrusive::StripedSet< + boost::intrusive::sg_set > + > striped_set; + \endcode + + Note that \p boost::intrusive::compare option using in \p boost::intrusive::sg_set + should support \p T type stored in the set and any type \p Q that you can use + in \p erase and \p find member functions. +
\p boost::intrusive::splay_set\code + #include + #include + typedef cds::intrusive::StripedSet< + boost::intrusive::splay_set > + > striped_set; + \endcode + + Note that \p boost::intrusive::compare option using in \p boost::intrusive::splay_set + should support \p T type stored in the set and any type \p Q that you can use + in \p erase and \p find member functions. +
\p boost::intrusive::treap_set\code + #include + #include + typedef cds::intrusive::StripedSet< + boost::intrusive::treap_set > + > striped_set; + \endcode + + Note that \p boost::intrusive::compare option using in \p boost::intrusive::treap_set + should support \p T type stored in the set and any type \p Q that you can use + in \p erase and \p find member functions. +
+ + You can use another intrusive container type as striped set's bucket. + Suppose, you have a container class \p MyBestContainer and you want to integrate it with \p StripedSet as bucket type. + There are two possibility: + - either your \p MyBestContainer class has native support of bucket's interface; + in this case, you can use default \p intrusive::striped_set::adapt metafunction; + - or your \p MyBestContainer class does not support bucket's interface, which means, that you should develop a specialization + cds::intrusive::striped_set::adapt metafunction providing necessary interface. + + The intrusive::striped_set::adapt< Container, OptionPack > metafunction has two template argument: + - \p Container is the class that should be used as the bucket, for example, boost::intrusive::list< T >. + - \p OptionPack is the packed options from \p %StripedSet declaration. The \p adapt metafunction can use + any option from \p OptionPack for its internal use. For example, a \p compare option can be passed to \p adapt + metafunction via \p OptionPack argument of \p %StripedSet declaration. + + See intrusive::striped_set::adapt metafunction for the description of interface that the bucket container must provide + to be \p %StripedSet compatible. + */ + template + class StripedSet + { + public: + //@cond + struct default_options { + typedef striped_set::striping<> mutex_policy; + typedef typename cds::opt::v::hash_selector< cds::opt::none >::type hash; + typedef cds::atomicity::item_counter item_counter; + typedef CDS_DEFAULT_ALLOCATOR allocator; + typedef cds::opt::none resizing_policy; + typedef cds::opt::none compare; + typedef cds::opt::none less; + }; + + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< default_options, CDS_OPTIONS9 >::type + ,CDS_OPTIONS9 + >::type options; + //@endcond + + typedef Container underlying_container_type ; ///< original intrusive container type for the bucket + typedef typename cds::intrusive::striped_set::adapt< underlying_container_type, CDS_OPTIONS9 >::type bucket_type ; ///< container type adapted for hash set + typedef typename bucket_type::value_type value_type ; ///< value type stored in the set + + typedef typename options::hash hash ; ///< Hash functor + typedef typename options::item_counter item_counter ; ///< Item counter + typedef typename cds::opt::select_default< + typename options::resizing_policy, + typename bucket_type::default_resizing_policy + >::type resizing_policy ; ///< Resizing policy + typedef typename options::allocator allocator_type ; ///< allocator type specified in options. + typedef typename options::mutex_policy mutex_policy ; ///< Mutex policy + + typedef cds::details::Allocator< bucket_type, allocator_type > bucket_allocator; ///< bucket allocator type based on allocator_type + + protected: + bucket_type * m_Buckets ; ///< Bucket table + size_t m_nBucketMask ; ///< Bucket table size - 1. m_nBucketMask + 1 should be power of two. + item_counter m_ItemCounter ; ///< Item counter + hash m_Hash ; ///< Hash functor + + mutex_policy m_MutexPolicy ; ///< Mutex policy + resizing_policy m_ResizingPolicy; ///< Resizing policy + + static const size_t c_nMinimalCapacity = 16 ; ///< Minimal capacity + + protected: + //@cond + typedef typename mutex_policy::scoped_cell_lock scoped_cell_lock; + typedef typename mutex_policy::scoped_full_lock scoped_full_lock; + typedef typename mutex_policy::scoped_resize_lock scoped_resize_lock; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_insert_functor { + void operator()( value_type& ) + {} + }; + + struct empty_erase_functor { + void operator()( value_type const& ) + {} + }; + + struct empty_find_functor { + template + void operator()( value_type& item, Q& val ) + {} + }; +# endif + //@endcond + + protected: + //@cond + static size_t calc_init_capacity( size_t nCapacity ) + { + nCapacity = cds::beans::ceil2( nCapacity ); + return nCapacity < c_nMinimalCapacity ? c_nMinimalCapacity : nCapacity; + } + + void alloc_bucket_table( size_t nSize ) + { + assert( cds::beans::is_power2( nSize )); + m_nBucketMask = nSize - 1; + m_Buckets = bucket_allocator().NewArray( nSize ); + } + + static void free_bucket_table( bucket_type * pBuckets, size_t nSize ) + { + bucket_allocator().Delete( pBuckets, nSize ); + } + + template + size_t hashing( Q const& v ) const + { + return m_Hash( v ); + } + + bucket_type * bucket( size_t nHash ) const CDS_NOEXCEPT + { + return m_Buckets + (nHash & m_nBucketMask); + } + + template + bool find_( Q& val, Func f ) + { + size_t nHash = hashing( val ); + + scoped_cell_lock sl( m_MutexPolicy, nHash ); + return bucket( nHash )->find( val, f ); + } + + template + bool find_with_( Q& val, Less pred, Func f ) + { + size_t nHash = hashing( val ); + scoped_cell_lock sl( m_MutexPolicy, nHash ); + return bucket( nHash )->find( val, pred, f ); + } + + void internal_resize( size_t nNewCapacity ) + { + // All locks are already locked! + m_MutexPolicy.resize( nNewCapacity ); + + size_t nOldCapacity = bucket_count(); + bucket_type * pOldBuckets = m_Buckets; + + alloc_bucket_table( nNewCapacity ); + + typedef typename bucket_type::iterator bucket_iterator; + bucket_type * pEnd = pOldBuckets + nOldCapacity; + for ( bucket_type * pCur = pOldBuckets; pCur != pEnd; ++pCur ) { + bucket_iterator itEnd = pCur->end(); + bucket_iterator itNext; + for ( bucket_iterator it = pCur->begin(); it != itEnd; it = itNext ) { + itNext = it; + ++itNext; + bucket( m_Hash( *it ) )->move_item( *pCur, it ); + } + pCur->clear(); + } + + free_bucket_table( pOldBuckets, nOldCapacity ); + + m_ResizingPolicy.reset(); + } + + void resize() + { + size_t nOldCapacity = bucket_count(); + size_t volatile& refBucketMask = m_nBucketMask; + + scoped_resize_lock al( m_MutexPolicy ); + if ( al.success() ) { + if ( nOldCapacity != refBucketMask + 1 ) { + // someone resized already + return; + } + + internal_resize( nOldCapacity * 2 ); + } + } + + //@endcond + + public: + /// Default ctor. The initial capacity is 16. + StripedSet() + : m_Buckets( null_ptr() ) + , m_nBucketMask( c_nMinimalCapacity - 1 ) + , m_MutexPolicy( c_nMinimalCapacity ) + { + alloc_bucket_table( m_nBucketMask + 1 ); + } + + /// Ctor with initial capacity specified + StripedSet( + size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. + ) + : m_Buckets( null_ptr() ) + , m_nBucketMask( calc_init_capacity(nCapacity) - 1 ) + , m_MutexPolicy( m_nBucketMask + 1 ) + { + alloc_bucket_table( m_nBucketMask + 1 ); + } + + /// Ctor with resizing policy (copy semantics) + /** + This constructor initializes m_ResizingPolicy member with copy of \p resizingPolicy parameter + */ + StripedSet( + size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. + ,resizing_policy const& resizingPolicy ///< Resizing policy + ) + : m_Buckets( null_ptr() ) + , m_nBucketMask( ( nCapacity ? calc_init_capacity(nCapacity) : c_nMinimalCapacity ) - 1 ) + , m_MutexPolicy( m_nBucketMask + 1 ) + , m_ResizingPolicy( resizingPolicy ) + { + alloc_bucket_table( m_nBucketMask + 1 ); + } + +#ifdef CDS_RVALUE_SUPPORT + /// Ctor with resizing policy (move semantics) + /** + This constructor initializes m_ResizingPolicy member moving \p resizingPolicy parameter + Move semantics is used. Available only for the compilers that supports C++11 rvalue reference. + */ + StripedSet( + size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. + ,resizing_policy&& resizingPolicy ///< Resizing policy + ) + : m_Buckets( null_ptr() ) + , m_nBucketMask( ( nCapacity ? calc_init_capacity(nCapacity) : c_nMinimalCapacity ) - 1 ) + , m_MutexPolicy( m_nBucketMask + 1 ) + , m_ResizingPolicy( resizingPolicy ) + { + alloc_bucket_table( m_nBucketMask + 1 ); + } +#endif + + /// Destructor destroys internal data + ~StripedSet() + { + free_bucket_table( m_Buckets, m_nBucketMask + 1 ); + } + + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return insert( val, []( value_type& ) {} ); +# else + return insert( val, empty_insert_functor() ); +# endif + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. + + The user-defined functor is called only if the inserting is success and can be passed by reference + using boost::ref + */ + template + bool insert( value_type& val, Func f ) + { + bool bOk; + bool bResize; + size_t nHash = hashing( val ); + bucket_type * pBucket; + { + scoped_cell_lock sl( m_MutexPolicy, nHash ); + pBucket = bucket( nHash ); + bOk = pBucket->insert( val, f ); + bResize = bOk && m_ResizingPolicy( ++m_ItemCounter, *this, *pBucket ); + } + + if ( bResize ) + resize(); + return bOk; + } + + /// Ensures that the \p val exists in the set + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the set, then \p val is inserted into the set. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p ensure function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same thing. + + The functor may change non-key fields of the \p item. + + You may pass \p func argument by reference using boost::ref or cds::ref. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair ensure( value_type& val, Func func ) + { + std::pair result; + bool bResize; + size_t nHash = hashing( val ); + bucket_type * pBucket; + { + scoped_cell_lock sl( m_MutexPolicy, nHash ); + pBucket = bucket( nHash ); + + result = pBucket->ensure( val, func ); + bResize = result.first && result.second && m_ResizingPolicy( ++m_ItemCounter, *this, *pBucket ); + } + + if ( bResize ) + resize(); + return result; + } + + /// Unlink the item \p val from the set + /** + The function searches the item \p val in the set and unlink it + if it is found and is equal to \p val (here, the equality means that + \p val belongs to the set: if \p item is an item found then + unlink is successful iif &val == &item) + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + bool bOk; + size_t nHash = hashing( val ); + { + scoped_cell_lock sl( m_MutexPolicy, nHash ); + bOk = bucket( nHash )->unlink( val ); + } + + if ( bOk ) + --m_ItemCounter; + return bOk; + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_StripedSet_erase + The function searches an item with key equal to \p val in the set, + unlinks it from the set, and returns a pointer to unlinked item. + + If the item with key equal to \p val is not found the function return \p NULL. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + value_type * erase( Q const& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase( val, [](value_type const&) {} ); +# else + return erase( val, empty_erase_functor() ); +# endif + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_StripedSet_erase "erase(Q const&)" + but \p pred is used for key comparing + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + value_type * erase_with( Q const& val, Less pred ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return erase_with( val, pred, [](value_type const&) {} ); +# else + return erase_with( val, pred, empty_erase_functor() ); +# endif + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_StripedSet_erase_func + + The function searches an item with key equal to \p val in the set, + call \p f functor with item found, unlinks it from the set, and returns a pointer to unlinked item. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + The functor may be passed by reference with boost:ref + + If the item with key equal to \p val is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + value_type * erase( Q const& val, Func f ) + { + size_t nHash = hashing( val ); + value_type * pVal; + { + scoped_cell_lock sl( m_MutexPolicy, nHash ); + pVal = bucket( nHash )->erase( val, f ); + } + + if ( pVal ) + --m_ItemCounter; + return pVal; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_StripedSet_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + value_type * erase_with( Q const& val, Less pred, Func f ) + { + size_t nHash = hashing( val ); + value_type * pVal; + { + scoped_cell_lock sl( m_MutexPolicy, nHash ); + pVal = bucket( nHash )->erase( val, pred, f ); + } + + if ( pVal ) + --m_ItemCounter; + return pVal; + } + + /// Find the key \p val + /** \anchor cds_intrusive_StripedSet_find_func + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return find_( val, f ); + } + + /// Find the key \p val using \p pred predicate + /** + The function is an analog of \ref cds_intrusive_StripedSet_find_func "find(Q&, Func)" + but \p pred is used for key comparing + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { + return find_with_( val, pred, f ); + } + + /// Find the key \p val + /** \anchor cds_intrusive_StripedSet_find_cfunc + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by reference using boost::ref or cds::ref. + + The functor may change non-key fields of \p item. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return find_( val, f ); + } + + /// Find the key \p val using \p pred predicate + /** + The function is an analog of \ref cds_intrusive_StripedSet_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { + return find_with_( val, pred, f ); + } + + /// Find the key \p val + /** \anchor cds_intrusive_StripedSet_find_val + The function searches the item with key equal to \p val + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool find( Q const& val ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return find( val, [](value_type&, Q const& ) {} ); +# else + return find( val, empty_find_functor() ); +# endif + } + + /// Find the key \p val using \p pred predicate + /** + The function is an analog of \ref cds_intrusive_StripedSet_find_val "find(Q const&)" + but \p pred is used for key comparing + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred ) + { +# ifdef CDS_CXX11_LAMBDA_SUPPORT + return find_with( val, pred, [](value_type& , Q const& ) {} ); +# else + return find_with( val, pred, empty_find_functor() ); +# endif + } + + /// Clears the set + /** + The function unlinks all items from the set. + */ + void clear() + { + // locks entire array + scoped_full_lock sl( m_MutexPolicy ); + + size_t nBucketCount = bucket_count(); + bucket_type * pBucket = m_Buckets; + for ( size_t i = 0; i < nBucketCount; ++i, ++pBucket ) + pBucket->clear(); + m_ItemCounter.reset(); + } + + /// Clears the set and calls \p disposer for each item + /** + The function unlinks all items from the set calling \p disposer for each item. + \p Disposer functor interface is: + \code + struct Disposer{ + void operator()( value_type * p ); + }; + \endcode + */ + template + void clear_and_dispose( Disposer disposer ) + { + // locks entire array + scoped_full_lock sl( m_MutexPolicy ); + + size_t nBucketCount = bucket_count(); + bucket_type * pBucket = m_Buckets; + for ( size_t i = 0; i < nBucketCount; ++i, ++pBucket ) + pBucket->clear( disposer ); + m_ItemCounter.reset(); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + size_t size() const + { + return m_ItemCounter; + } + + /// Returns the size of hash table + /** + The hash table size is non-constant and can be increased via resizing. + */ + size_t bucket_count() const + { + return m_nBucketMask + 1; + } + + /// Returns lock array size + size_t lock_count() const + { + return m_MutexPolicy.lock_count(); + } + + /// Returns resizing policy object + resizing_policy& get_resizing_policy() + { + return m_ResizingPolicy; + } + + /// Returns resizing policy (const version) + resizing_policy const& get_resizing_policy() const + { + return m_ResizingPolicy; + } + }; +}} // namespace cds::itrusive + +#endif // #ifndef __CDS_INTRUSIVE_STRIPED_SET_H diff --git a/cds/intrusive/striped_set/adapter.h b/cds/intrusive/striped_set/adapter.h new file mode 100644 index 00000000..8655175a --- /dev/null +++ b/cds/intrusive/striped_set/adapter.h @@ -0,0 +1,371 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_STRIPED_SET_ADAPTER_H +#define __CDS_INTRUSIVE_STRIPED_SET_ADAPTER_H + +#include +#include +#include +#include // cds::opt::details::make_comparator - for some adapt specializations +#include + +namespace cds { namespace intrusive { + + /// StripedSet related definitions + namespace striped_set { + + /// Default adapter for intrusive striped/refinable hash set + /** + By default, the metafunction does not make any transformation for container type \p Container. + \p Container should provide interface suitable for the hash set. + + The \p SetOptions template argument contains option pack + that has been passed to cds::intrusive::StripedSet. + + Bucket interface + + The result of metafunction is a container (a bucket) that should support the following interface: + + Public typedefs that the bucket should provide: + - \p value_type - the type of the item in the bucket + - \p iterator - bucket's item iterator + - \p const_iterator - bucket's item constant iterator + - \p default_resizing_policy - default resizing policy preferable for the container. + By default, the library defines cds::container::striped_set::load_factor_resizing<4> for sequential containers like + boost::intrusive::list, and cds::container::striped_set::no_resizing for ordered container like boost::intrusive::set. + + Insert value \p val of type \p Q + \code template bool insert( value_type& val, Func f ) ; \endcode + Inserts \p val into the container and, if inserting is successful, calls functor \p f + with \p val. + + The functor signature is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item inserted. + + The user-defined functor \p f is called only if the inserting is success. It can be passed by reference + using boost::ref +
+ + Ensures that the \p item exists in the container + \code template std::pair ensure( value_type& val, Func f ) \endcode + The operation performs inserting or changing data. + + If the \p val key not found in the container, then \p val is inserted. + Otherwise, the functor \p f is called with the item found. + + The \p Func functor has the following interface: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + or like a functor: + \code + struct functor { + void operator()( bool bNew, value_type& item, value_type& val ); + }; + \endcode + + where arguments are: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - container's item + - \p val - argument \p val passed into the \p ensure function + + If \p val has been inserted (i.e. bNew == true) then \p item and \p val + are the same element: &item == &val. Otherwise, they are different. + + The functor can change non-key fields of the \p item. + + You can pass \p f argument by reference using boost::ref. + + Returns std::pair where \p first is true if operation is successfull, + \p second is true if new item has been added or \p false if the item with \p val key + already exists. +
+ + Unlink an item + \code bool unlink( value_type& val ) \endcode + Unlink \p val from the container if \p val belongs to it. +
+ + Erase \p key + \code template bool erase( Q const& key, Func f ) \endcode + The function searches an item with key \p key, calls \p f functor + and erases the item. If \p key is not found, the functor is not called. + + The functor \p Func interface is: + \code + struct functor { + void operator()(value_type& val); + }; + \endcode + The functor can be passed by reference using boost:ref + + The type \p Q can differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q. + + Return \p true if key is found and deleted, \p false otherwise +
+ + + Find the key \p val + \code + template bool find( Q& val, Func f ) + template bool find( Q& val, Compare cmp, Func f ) + \endcode + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + You can pass \p f argument by reference using boost::ref or cds::ref. + + The functor can change non-key fields of \p item. + The \p val argument may be non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + The type \p Q can differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q. + + The first form uses default \p compare function used for key ordering. + The second form allows to point specific \p Compare functor \p cmp + that can compare \p value_typwe and \p Q type. The interface of \p Compare is the same as \p std::less. + + The function returns \p true if \p val is found, \p false otherwise. +
+ + Clears the container + \code + void clear() + template void clear( Disposer disposer ) + \endcode + Second form calls \p disposer for each item in the container before clearing. +
+ + Get size of bucket + \code size_t size() const \endcode + This function may be required by some resizing policy +
+ + Iterators + \code + iterator begin(); + const_iterator begin() const; + iterator end(); + const_iterator end() const; + \endcode +
+ + Move item when resizing + \code void move_item( adapted_container& from, iterator it ) \endcode + This helper function is invented for the set resizing when the item + pointed by \p it iterator is copied from old bucket \p from to a new bucket + pointed by \p this. +
+ + */ + template < typename Container, CDS_DECL_OPTIONS > + class adapt + { + public: + typedef Container type ; ///< adapted container type + typedef typename type::value_type value_type ; ///< value type stored in the container + }; + + //@cond + struct adapted_sequential_container + { + typedef striped_set::load_factor_resizing<4> default_resizing_policy; + }; + + struct adapted_container + { + typedef striped_set::no_resizing default_resizing_policy; + }; + //@endcond + + //@cond + namespace details { + template + class boost_intrusive_set_adapter: public cds::intrusive::striped_set::adapted_container + { + public: + typedef Set container_type; + + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + typedef typename container_type::value_compare key_comparator; + + private: +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_insert_functor { + void operator()( value_type& ) + {} + }; +# endif + + container_type m_Set; + + public: + boost_intrusive_set_adapter() + {} + + container_type& base_container() + { + return m_Set; + } + + template + bool insert( value_type& val, Func f ) + { + std::pair res = m_Set.insert( val ); + if ( res.second ) + cds::unref(f)( val ); + return res.second; + } + + template + std::pair ensure( value_type& val, Func f ) + { + std::pair res = m_Set.insert( val ); + cds::unref(f)( res.second, *res.first, val ); + return std::make_pair( true, res.second ); + } + + bool unlink( value_type& val ) + { + iterator it = m_Set.find( value_type(val) ); + if ( it == m_Set.end() || &(*it) != &val ) + return false; + m_Set.erase( it ); + return true; + } + + template + value_type * erase( Q const& key, Func f ) + { + iterator it = m_Set.find( key, key_comparator() ); + if ( it == m_Set.end() ) + return null_ptr(); + value_type& val = *it; + cds::unref(f)( val ); + m_Set.erase( it ); + return &val; + } + + template + value_type * erase( Q const& key, Less pred, Func f ) + { + iterator it = m_Set.find( key, pred ); + if ( it == m_Set.end() ) + return null_ptr(); + value_type& val = *it; + cds::unref(f)( val ); + m_Set.erase( it ); + return &val; + } + + template + bool find( Q& key, Func f ) + { + return find( key, key_comparator(), f ); + } + + template + bool find( Q& key, Compare cmp, Func f ) + { + iterator it = m_Set.find( key, cmp ); + if ( it == m_Set.end() ) + return false; + cds::unref(f)( *it, key ); + return true; + } + + void clear() + { + m_Set.clear(); + } + + template + void clear( Disposer disposer ) + { + m_Set.clear_and_dispose( disposer ); + } + + iterator begin() { return m_Set.begin(); } + const_iterator begin() const { return m_Set.begin(); } + iterator end() { return m_Set.end(); } + const_iterator end() const { return m_Set.end(); } + + size_t size() const + { + return (size_t) m_Set.size(); + } + + void move_item( boost_intrusive_set_adapter& from, iterator itWhat ) + { + value_type& val = *itWhat; + from.base_container().erase( itWhat ); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + insert( val, []( value_type& ) {} ); +# else + insert( val, empty_insert_functor() ); +# endif + } + }; + } // namespace details + //@endcond + + } // namespace striped_set +}} // namespace cds::intrusive + +//@cond +#if defined(CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT) && defined(BOOST_INTRUSIVE_VARIADIC_TEMPLATES) +# define CDS_BOOST_INTRUSIVE_DECL_OPTIONS3 typename... BIOptions +# define CDS_BOOST_INTRUSIVE_DECL_OPTIONS4 typename... BIOptions +# define CDS_BOOST_INTRUSIVE_DECL_OPTIONS5 typename... BIOptions +# define CDS_BOOST_INTRUSIVE_DECL_OPTIONS6 typename... BIOptions +# define CDS_BOOST_INTRUSIVE_DECL_OPTIONS7 typename... BIOptions +# define CDS_BOOST_INTRUSIVE_DECL_OPTIONS8 typename... BIOptions +# define CDS_BOOST_INTRUSIVE_DECL_OPTIONS9 typename... BIOptions +# define CDS_BOOST_INTRUSIVE_DECL_OPTIONS10 typename... BIOptions + +# define CDS_BOOST_INTRUSIVE_OPTIONS3 BIOptions... +# define CDS_BOOST_INTRUSIVE_OPTIONS4 BIOptions... +# define CDS_BOOST_INTRUSIVE_OPTIONS5 BIOptions... +# define CDS_BOOST_INTRUSIVE_OPTIONS6 BIOptions... +# define CDS_BOOST_INTRUSIVE_OPTIONS7 BIOptions... +# define CDS_BOOST_INTRUSIVE_OPTIONS8 BIOptions... +# define CDS_BOOST_INTRUSIVE_OPTIONS9 BIOptions... +# define CDS_BOOST_INTRUSIVE_OPTIONS10 BIOptions... +#else +# define CDS_BOOST_INTRUSIVE_DECL_OPTIONS3 typename BIO1, typename BIO2, typename BIO3 +# define CDS_BOOST_INTRUSIVE_DECL_OPTIONS4 CDS_BOOST_INTRUSIVE_DECL_OPTIONS3, typename BIO4 +# define CDS_BOOST_INTRUSIVE_DECL_OPTIONS5 CDS_BOOST_INTRUSIVE_DECL_OPTIONS4, typename BIO5 +# define CDS_BOOST_INTRUSIVE_DECL_OPTIONS6 CDS_BOOST_INTRUSIVE_DECL_OPTIONS5, typename BIO6 +# define CDS_BOOST_INTRUSIVE_DECL_OPTIONS7 CDS_BOOST_INTRUSIVE_DECL_OPTIONS6, typename BIO7 +# define CDS_BOOST_INTRUSIVE_DECL_OPTIONS8 CDS_BOOST_INTRUSIVE_DECL_OPTIONS7, typename BIO8 +# define CDS_BOOST_INTRUSIVE_DECL_OPTIONS9 CDS_BOOST_INTRUSIVE_DECL_OPTIONS8, typename BIO9 +# define CDS_BOOST_INTRUSIVE_DECL_OPTIONS10 CDS_BOOST_INTRUSIVE_DECL_OPTIONS9, typename BIO10 + +# define CDS_BOOST_INTRUSIVE_OPTIONS3 BIO1,BIO2,BIO3 +# define CDS_BOOST_INTRUSIVE_OPTIONS4 CDS_BOOST_INTRUSIVE_OPTIONS3, BIO4 +# define CDS_BOOST_INTRUSIVE_OPTIONS5 CDS_BOOST_INTRUSIVE_OPTIONS4, BIO5 +# define CDS_BOOST_INTRUSIVE_OPTIONS6 CDS_BOOST_INTRUSIVE_OPTIONS5, BIO6 +# define CDS_BOOST_INTRUSIVE_OPTIONS7 CDS_BOOST_INTRUSIVE_OPTIONS6, BIO7 +# define CDS_BOOST_INTRUSIVE_OPTIONS8 CDS_BOOST_INTRUSIVE_OPTIONS7, BIO8 +# define CDS_BOOST_INTRUSIVE_OPTIONS9 CDS_BOOST_INTRUSIVE_OPTIONS8, BIO9 +# define CDS_BOOST_INTRUSIVE_OPTIONS10 CDS_BOOST_INTRUSIVE_OPTIONS9, BIO10 +#endif +//@endcond + +#endif // #ifndef __CDS_INTRUSIVE_STRIPED_SET_ADAPTER_H diff --git a/cds/intrusive/striped_set/boost_avl_set.h b/cds/intrusive/striped_set/boost_avl_set.h new file mode 100644 index 00000000..70e1ed51 --- /dev/null +++ b/cds/intrusive/striped_set/boost_avl_set.h @@ -0,0 +1,25 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_STRIPED_SET_BOOST_AVL_SET_ADAPTER_H +#define __CDS_INTRUSIVE_STRIPED_SET_BOOST_AVL_SET_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace intrusive { namespace striped_set { + + template + class adapt< boost::intrusive::avl_set< T, CDS_BOOST_INTRUSIVE_OPTIONS4 >, CDS_OPTIONS > + { + public: + typedef boost::intrusive::avl_set< T, CDS_BOOST_INTRUSIVE_OPTIONS4 > container_type ; ///< underlying intrusive container type + + public: + typedef details::boost_intrusive_set_adapter type ; ///< Result of the metafunction + + }; +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef __CDS_INTRUSIVE_STRIPED_SET_BOOST_AVL_SET_ADAPTER_H diff --git a/cds/intrusive/striped_set/boost_list.h b/cds/intrusive/striped_set/boost_list.h new file mode 100644 index 00000000..c5744b72 --- /dev/null +++ b/cds/intrusive/striped_set/boost_list.h @@ -0,0 +1,209 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_STRIPED_SET_BOOST_LIST_ADAPTER_H +#define __CDS_INTRUSIVE_STRIPED_SET_BOOST_LIST_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace intrusive { namespace striped_set { + + template + class adapt< boost::intrusive::list< T, CDS_BOOST_INTRUSIVE_OPTIONS3 >, CDS_OPTIONS > + { + public: + typedef boost::intrusive::list< T, CDS_BOOST_INTRUSIVE_OPTIONS3 > container_type ; ///< underlying intrusive container type + + private: + /// Adapted intrusive container + class adapted_container: public cds::intrusive::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, CDS_OPTIONS >::type key_comparator; + + private: + struct find_predicate + { + bool operator()( value_type const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( Q const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( value_type const& i1, Q const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + }; + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_insert_functor { + void operator()( value_type& ) + {} + }; +# endif + + template + iterator find_key( Q const& key, Pred pred) + { + iterator itEnd = m_List.end(); + iterator it; + for ( it = m_List.begin(); it != itEnd; ++it ) { + if ( !pred( *it, key ) ) + break; + } + return it; + } + + private: + container_type m_List; + + public: + adapted_container() + {} + + container_type& base_container() + { + return m_List; + } + + template + bool insert( value_type& val, Func f ) + { + iterator it = find_key( val, find_predicate() ); + if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { + m_List.insert( it, val ); + cds::unref( f )( val ); + + return true; + } + + // key already exists + return false; + } + + template + std::pair ensure( value_type& val, Func f ) + { + iterator it = find_key( val, find_predicate() ); + if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { + // insert new + m_List.insert( it, val ); + cds::unref( f )( true, val, val ); + return std::make_pair( true, true ); + } + else { + // already exists + cds::unref( f )( false, *it, val ); + return std::make_pair( true, false ); + } + } + + bool unlink( value_type& val ) + { + iterator it = find_key( val, find_predicate() ); + if ( it == m_List.end() || &(*it) != &val ) + return false; + + m_List.erase( it ); + return true; + } + + template + value_type * erase( Q const& key, Func f ) + { + iterator it = find_key( key, find_predicate() ); + if ( it == m_List.end() || key_comparator()( key, *it ) != 0 ) + return null_ptr(); + + // key exists + value_type& val = *it; + cds::unref( f )( val ); + m_List.erase( it ); + + return &val; + } + + template + value_type * erase( Q const& key, Less pred, Func f ) + { + iterator it = find_key( key, pred ); + if ( it == m_List.end() || pred( key, *it ) || pred( *it, key ) ) + return null_ptr(); + + // key exists + value_type& val = *it; + cds::unref( f )( val ); + m_List.erase( it ); + + return &val; + } + + template + bool find( Q& key, Func f ) + { + return find( key, find_predicate(), f ); + } + + template + bool find( Q& key, Less pred, Func f ) + { + iterator it = find_key( key, pred ); + if ( it == m_List.end() || pred( key, *it ) || pred( *it, key )) + return false; + + // key exists + cds::unref( f )( *it, key ); + return true; + } + + void clear() + { + m_List.clear(); + } + + template + void clear( Disposer disposer ) + { + m_List.clear_and_dispose( disposer ); + } + + iterator begin() { return m_List.begin(); } + const_iterator begin() const { return m_List.begin(); } + iterator end() { return m_List.end(); } + const_iterator end() const { return m_List.end(); } + + size_t size() const + { + return (size_t) m_List.size(); + } + + void move_item( adapted_container& from, iterator itWhat ) + { + value_type& val = *itWhat; + from.base_container().erase( itWhat ); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + insert( val, []( value_type& ) {} ); +# else + insert( val, empty_insert_functor() ); +# endif + } + + }; + public: + typedef adapted_container type ; ///< Result of the metafunction + }; +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef __CDS_INTRUSIVE_STRIPED_SET_BOOST_LIST_ADAPTER_H diff --git a/cds/intrusive/striped_set/boost_set.h b/cds/intrusive/striped_set/boost_set.h new file mode 100644 index 00000000..c8fcadfd --- /dev/null +++ b/cds/intrusive/striped_set/boost_set.h @@ -0,0 +1,25 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_STRIPED_SET_BOOST_SET_ADAPTER_H +#define __CDS_INTRUSIVE_STRIPED_SET_BOOST_SET_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace intrusive { namespace striped_set { + + template + class adapt< boost::intrusive::set< T, CDS_BOOST_INTRUSIVE_OPTIONS4 >, CDS_OPTIONS > + { + public: + typedef boost::intrusive::set< T, CDS_BOOST_INTRUSIVE_OPTIONS4 > container_type ; ///< underlying intrusive container type + + public: + typedef details::boost_intrusive_set_adapter type ; ///< Result of the metafunction + + }; +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef __CDS_INTRUSIVE_STRIPED_SET_BOOST_SET_ADAPTER_H diff --git a/cds/intrusive/striped_set/boost_sg_set.h b/cds/intrusive/striped_set/boost_sg_set.h new file mode 100644 index 00000000..d0445987 --- /dev/null +++ b/cds/intrusive/striped_set/boost_sg_set.h @@ -0,0 +1,25 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_STRIPED_SET_BOOST_SG_SET_ADAPTER_H +#define __CDS_INTRUSIVE_STRIPED_SET_BOOST_SG_SET_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace intrusive { namespace striped_set { + + template + class adapt< boost::intrusive::sg_set< T, CDS_BOOST_INTRUSIVE_OPTIONS4 >, CDS_OPTIONS > + { + public: + typedef boost::intrusive::sg_set< T, CDS_BOOST_INTRUSIVE_OPTIONS4 > container_type ; ///< underlying intrusive container type + + public: + typedef details::boost_intrusive_set_adapter type ; ///< Result of the metafunction + + }; +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef __CDS_INTRUSIVE_STRIPED_SET_BOOST_SG_SET_ADAPTER_H diff --git a/cds/intrusive/striped_set/boost_slist.h b/cds/intrusive/striped_set/boost_slist.h new file mode 100644 index 00000000..0d861167 --- /dev/null +++ b/cds/intrusive/striped_set/boost_slist.h @@ -0,0 +1,226 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_STRIPED_SET_BOOST_SLIST_ADAPTER_H +#define __CDS_INTRUSIVE_STRIPED_SET_BOOST_SLIST_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace intrusive { namespace striped_set { + + template + class adapt< boost::intrusive::slist< T, CDS_BOOST_INTRUSIVE_OPTIONS5 >, CDS_OPTIONS > + { + public: + typedef boost::intrusive::slist< T, CDS_BOOST_INTRUSIVE_OPTIONS5 > container_type ; ///< underlying intrusive container type + + private: + /// Adapted intrusive container + class adapted_container: public cds::intrusive::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, CDS_OPTIONS >::type key_comparator; + + private: + + template + std::pair< iterator, bool > find_prev_item( Q const& key, Less pred ) + { + iterator itPrev = m_List.before_begin(); + iterator itEnd = m_List.end(); + for ( iterator it = m_List.begin(); it != itEnd; ++it ) { + if ( pred( key, *it ) ) + itPrev = it; + else if ( pred( *it, key ) ) + break; + else + return std::make_pair( itPrev, true ); + } + return std::make_pair( itPrev, false ); + } + + template + std::pair< iterator, bool > find_prev_item( Q const& key ) + { + return find_prev_item_cmp( key, key_comparator() ); + } + + template + std::pair< iterator, bool > find_prev_item_cmp( Q const& key, Compare cmp ) + { + iterator itPrev = m_List.before_begin(); + iterator itEnd = m_List.end(); + for ( iterator it = m_List.begin(); it != itEnd; ++it ) { + int nCmp = cmp( key, *it ); + if ( nCmp < 0 ) + itPrev = it; + else if ( nCmp > 0 ) + break; + else + return std::make_pair( itPrev, true ); + } + return std::make_pair( itPrev, false ); + } + + template + value_type * erase_( Q const& key, Compare cmp, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item_cmp( key, cmp ); + if ( !pos.second ) + return null_ptr(); + + // key exists + iterator it = pos.first; + value_type& val = *(++it); + cds::unref( f )( val ); + m_List.erase_after( pos.first ); + + return &val; + } + + +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_insert_functor { + void operator()( value_type& ) + {} + }; +# endif + + private: + container_type m_List; + + public: + adapted_container() + {} + + container_type& base_container() + { + return m_List; + } + + template + bool insert( value_type& val, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( val ); + if ( !pos.second ) { + m_List.insert_after( pos.first, val ); + cds::unref( f )( val ); + return true; + } + + // key already exists + return false; + } + + template + std::pair ensure( value_type& val, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( val ); + if ( !pos.second ) { + // insert new + m_List.insert_after( pos.first, val ); + cds::unref( f )( true, val, val ); + return std::make_pair( true, true ); + } + else { + // already exists + cds::unref( f )( false, *(++pos.first), val ); + return std::make_pair( true, false ); + } + } + + bool unlink( value_type& val ) + { + std::pair< iterator, bool > pos = find_prev_item( val ); + if ( !pos.second ) + return false; + + ++pos.first; + if ( &(*pos.first) != &val ) + return false; + + m_List.erase( pos.first ); + return true; + } + + template + value_type * erase( Q const& key, Func f ) + { + return erase_( key, key_comparator(), f ); + } + + template + value_type * erase( Q const& key, Less pred, Func f ) + { + return erase_( key, cds::opt::details::make_comparator_from_less(), f ); + } + + template + bool find( Q& key, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( key ); + if ( !pos.second ) + return false; + + // key exists + cds::unref( f )( *(++pos.first), key ); + return true; + } + + template + bool find( Q& key, Less pred, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( key, pred ); + if ( !pos.second ) + return false; + + // key exists + cds::unref( f )( *(++pos.first), key ); + return true; + } + + void clear() + { + m_List.clear(); + } + + template + void clear( Disposer disposer ) + { + m_List.clear_and_dispose( disposer ); + } + + iterator begin() { return m_List.begin(); } + const_iterator begin() const { return m_List.begin(); } + iterator end() { return m_List.end(); } + const_iterator end() const { return m_List.end(); } + + size_t size() const + { + return (size_t) m_List.size(); + } + + void move_item( adapted_container& from, iterator itWhat ) + { + value_type& val = *itWhat; + from.base_container().erase( itWhat ); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + insert( val, []( value_type& ) {} ); +# else + insert( val, empty_insert_functor() ); +# endif + } + + }; + public: + typedef adapted_container type ; ///< Result of the metafunction + }; +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef __CDS_INTRUSIVE_STRIPED_SET_BOOST_SLIST_ADAPTER_H diff --git a/cds/intrusive/striped_set/boost_splay_set.h b/cds/intrusive/striped_set/boost_splay_set.h new file mode 100644 index 00000000..d993317e --- /dev/null +++ b/cds/intrusive/striped_set/boost_splay_set.h @@ -0,0 +1,25 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_STRIPED_SET_BOOST_SPLAY_SET_ADAPTER_H +#define __CDS_INTRUSIVE_STRIPED_SET_BOOST_SPLAY_SET_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace intrusive { namespace striped_set { + + template + class adapt< boost::intrusive::splay_set< T, CDS_BOOST_INTRUSIVE_OPTIONS4 >, CDS_OPTIONS > + { + public: + typedef boost::intrusive::splay_set< T, CDS_BOOST_INTRUSIVE_OPTIONS4 > container_type ; ///< underlying intrusive container type + + public: + typedef details::boost_intrusive_set_adapter type ; ///< Result of the metafunction + + }; +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef __CDS_INTRUSIVE_STRIPED_SET_BOOST_SPLAY_SET_ADAPTER_H diff --git a/cds/intrusive/striped_set/boost_treap_set.h b/cds/intrusive/striped_set/boost_treap_set.h new file mode 100644 index 00000000..0e0cc2b1 --- /dev/null +++ b/cds/intrusive/striped_set/boost_treap_set.h @@ -0,0 +1,25 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_STRIPED_SET_BOOST_TREAP_SET_ADAPTER_H +#define __CDS_INTRUSIVE_STRIPED_SET_BOOST_TREAP_SET_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace intrusive { namespace striped_set { + + template + class adapt< boost::intrusive::treap_set< T, CDS_BOOST_INTRUSIVE_OPTIONS4 >, CDS_OPTIONS > + { + public: + typedef boost::intrusive::treap_set< T, CDS_BOOST_INTRUSIVE_OPTIONS4 > container_type ; ///< underlying intrusive container type + + public: + typedef details::boost_intrusive_set_adapter type ; ///< Result of the metafunction + + }; +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef __CDS_INTRUSIVE_STRIPED_SET_BOOST_TREAP_SET_ADAPTER_H diff --git a/cds/intrusive/striped_set/boost_unordered_set.h b/cds/intrusive/striped_set/boost_unordered_set.h new file mode 100644 index 00000000..d7d46a67 --- /dev/null +++ b/cds/intrusive/striped_set/boost_unordered_set.h @@ -0,0 +1,192 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_STRIPED_SET_BOOST_UNORDERED_SET_ADAPTER_H +#define __CDS_INTRUSIVE_STRIPED_SET_BOOST_UNORDERED_SET_ADAPTER_H + +#include +#include +#include + +//@cond +namespace cds { namespace intrusive { namespace striped_set { + + template + class adapt< boost::intrusive::unordered_set< T, CDS_BOOST_INTRUSIVE_OPTIONS10 >, CDS_OPTIONS > + { + public: + typedef boost::intrusive::unordered_set< T, CDS_BOOST_INTRUSIVE_OPTIONS10 > container_type ; ///< underlying intrusive container type + + private: + class adapted_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + typedef typename opt::value< + typename opt::find_option< + opt::buffer< opt::v::static_buffer< cds::any_type, 256 > >, + CDS_OPTIONS + >::type + >::buffer initial_buffer_type; + typedef typename initial_buffer_type::template rebind< typename container_type::bucket_type >::other buffer_type; + typedef cds::intrusive::striped_set::load_factor_resizing<256> default_resizing_policy; + + private: +# ifndef CDS_CXX11_LAMBDA_SUPPORT + struct empty_insert_functor { + void operator()( value_type& ) + {} + }; +# endif + + template + struct equal_from_compare + { + Compare& m_cmp; + equal_from_compare( Compare& cmp ) + : m_cmp( cmp ) + {} + + equal_from_compare( equal_from_compare const& src ) + : m_cmp( src.m_cmp ) + {} + + template + bool operator()( A& a, B& b ) const + { + return !m_cmp( a, b ) && !m_cmp( b, a ); + } + + template + bool operator()( A& a, B& b ) + { + return !m_cmp( a, b ) && !m_cmp( b, a ); + } + }; + + buffer_type m_Buckets ; // buffer should be declared first since it is used in m_Set ctor. + container_type m_Set; + + public: + adapted_container() + : m_Set( typename container_type::bucket_traits( m_Buckets.buffer(), m_Buckets.capacity() )) + {} + + container_type& base_container() + { + return m_Set; + } + + template + bool insert( value_type& val, Func f ) + { + std::pair res = m_Set.insert( val ); + if ( res.second ) + cds::unref(f)( val ); + return res.second; + } + + template + std::pair ensure( value_type& val, Func f ) + { + std::pair res = m_Set.insert( val ); + cds::unref(f)( res.second, *res.first, val ); + return std::make_pair( true, res.second ); + } + + bool unlink( value_type& val ) + { + iterator it = m_Set.find( value_type(val) ); + if ( it == m_Set.end() || &(*it) != &val ) + return false; + m_Set.erase( it ); + return true; + } + + template + value_type * erase( Q const& key, Func f ) + { + iterator it = m_Set.find( key, typename container_type::hasher(), typename container_type::key_equal() ); + if ( it == m_Set.end() ) + return null_ptr(); + value_type& val = *it; + cds::unref(f)( val ); + m_Set.erase( it ); + return &val; + } + + template + value_type * erase( Q const& key, Less pred, Func f ) + { + iterator it = m_Set.find( key, typename container_type::hasher(), equal_from_compare(pred) ); + if ( it == m_Set.end() ) + return null_ptr(); + value_type& val = *it; + cds::unref(f)( val ); + m_Set.erase( it ); + return &val; + } + + template + bool find( Q& key, Func f ) + { + iterator it = m_Set.find( key, typename container_type::hasher(), typename container_type::key_equal() ); + if ( it == m_Set.end() ) + return false; + cds::unref(f)( *it, key ); + return true; + } + + template + bool find( Q& key, Less pred, Func f ) + { + iterator it = m_Set.find( key, typename container_type::hasher(), equal_from_compare(pred) ); + if ( it == m_Set.end() ) + return false; + cds::unref(f)( *it, key ); + return true; + } + + void clear() + { + m_Set.clear(); + } + + template + void clear( Disposer disposer ) + { + m_Set.clear_and_dispose( disposer ); + } + + iterator begin() { return m_Set.begin(); } + const_iterator begin() const { return m_Set.begin(); } + iterator end() { return m_Set.end(); } + const_iterator end() const { return m_Set.end(); } + + size_t size() const + { + return (size_t) m_Set.size(); + } + + void move_item( adapted_container& from, iterator itWhat ) + { + value_type& val = *itWhat; + from.base_container().erase( itWhat ); +# ifdef CDS_CXX11_LAMBDA_SUPPORT + insert( val, []( value_type& ) {} ); +# else + insert( val, empty_insert_functor() ); +# endif + } + }; + + public: + typedef adapted_container type ; ///< Result of the metafunction + + }; +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef __CDS_INTRUSIVE_STRIPED_SET_BOOST_UNORDERED_SET_ADAPTER_H diff --git a/cds/intrusive/striped_set/resizing_policy.h b/cds/intrusive/striped_set/resizing_policy.h new file mode 100644 index 00000000..2ffdc33e --- /dev/null +++ b/cds/intrusive/striped_set/resizing_policy.h @@ -0,0 +1,189 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_STRIPED_SET_RESIZING_POLICY_H +#define __CDS_INTRUSIVE_STRIPED_SET_RESIZING_POLICY_H + +#include + +namespace cds { namespace intrusive { namespace striped_set { + + /// Load factor based resizing policy + /** + When total item count in a container exceeds + container.bucket_count() * LoadFactor + then resizing is needed. + + This policy is stateless. + + The reset() function is called after the resizing is done. + The function is intended for resetting internal state of the policy. + */ + template + struct load_factor_resizing + { + /// Main policy operator returns \p true when resizing is needed + template + bool operator ()( + size_t nSize, ///< Current item count of \p container + Container const& container, ///< Container + Bucket const& /*bucket*/ ///< reference to a container's bucket (not used) + ) const + { + return nSize > container.bucket_count() * LoadFactor; + } + + /// Resets internal state of the policy (does nothing) + void reset() + {} + }; + + /// Load factor based resizing policy, stateful specialization + /** + This specialization allows to specify a load factor at runtime. + */ + template <> + struct load_factor_resizing<0> + { + ///@cond + const size_t m_nLoadFactor; + //@endcond + public: + /// Default ctor, load factor is 4 + load_factor_resizing() + : m_nLoadFactor(4) + {} + + /// Ctor with explicitly defined \p nLoadFactor + explicit load_factor_resizing( size_t nLoadFactor ) + : m_nLoadFactor( nLoadFactor ) + {} + + /// Copy ctor + load_factor_resizing( load_factor_resizing const& src ) + : m_nLoadFactor( src.m_nLoadFactor ) + {} + +# ifdef CDS_RVALUE_SUPPORT + /// Move ctor (for the compilers supporting rvalue reference) + load_factor_resizing( load_factor_resizing&& src ) + : m_nLoadFactor( src.m_nLoadFactor ) + {} +# endif + + /// Main policy operator returns \p true when resizing is needed + template + bool operator ()( + size_t nSize, ///< Current item count of \p container + Container const& container, ///< Container + Bucket const& /*bucket*/ ///< reference to a container's bucket (not used) + ) + { + return nSize > container.bucket_count() * m_nLoadFactor; + } + + /// Resets internal state of the policy (does nothing) + void reset() + {} + }; + + + /// Single bucket threshold resizing policy + /** + If any single bucket size exceeds the global \p Threshold then resizing is needed. + + This policy is stateless. + */ + template + struct single_bucket_size_threshold + { + /// Main policy operator returns \p true when resizing is needed + template + bool operator ()( + size_t /*nSize*/, ///< Current item count of \p container (not used) + Container const& /*container*/, ///< Container (not used) + Bucket const& bucket ///< reference to a container's bucket + ) const + { + return bucket.size() > Threshold; + } + + /// Resets internal state of the policy (does nothing) + void reset() + {} + }; + + + /// Single bucket threshold resizing policy, stateful specialization + /** + This specialization allows to specify and modify a threshold at runtime. + */ + template <> + struct single_bucket_size_threshold<0> + { + size_t m_nThreshold ; ///< The bucket size threshold + + /// Default ctor, the threshold is 4 + single_bucket_size_threshold() + : m_nThreshold(4) + {} + + /// Ctor with explicitly defined \p nThreshold + explicit single_bucket_size_threshold( size_t nThreshold ) + : m_nThreshold( nThreshold ) + {} + + /// Copy ctor + single_bucket_size_threshold( single_bucket_size_threshold const& src ) + : m_nThreshold( src.m_nThreshold ) + {} + +# ifdef CDS_RVALUE_SUPPORT + /// Move ctor (for the compilers supporting rvalue reference) + single_bucket_size_threshold( single_bucket_size_threshold&& src ) + : m_nThreshold( src.m_nThreshold ) + {} +# endif + + /// Main policy operator returns \p true when resizing is needed + template + bool operator ()( + size_t /*nSize*/, ///< Current item count of \p container (not used) + Container const& /*container*/, ///< Container (not used) + Bucket const& bucket ///< reference to a container's bucket + ) const + { + return bucket.size() > m_nThreshold; + } + + /// Resets internal state of the policy (does nothing) + void reset() + {} + }; + + /// Dummy resizing policy + /** + This policy is dummy and always returns \p false that means no resizing is needed. + + This policy is stateless. + */ + struct no_resizing + { + /// Main policy operator always returns \p false + template + bool operator ()( + size_t /*nSize*/, ///< Current item count of \p container (not used) + Container const& /*container*/, ///< Container (not used) + Bucket const& /*bucket*/ ///< reference to a container's bucket (not used) + ) const + { + return false; + } + + /// Resets internal state of the policy (does nothing) + void reset() + {} + }; + +}}} // namespace cds::intrusive::striped_set + +#endif // #define __CDS_INTRUSIVE_STRIPED_SET_RESIZING_POLICY_H diff --git a/cds/intrusive/striped_set/striping_policy.h b/cds/intrusive/striped_set/striping_policy.h new file mode 100644 index 00000000..881d2cf4 --- /dev/null +++ b/cds/intrusive/striped_set/striping_policy.h @@ -0,0 +1,360 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_STRIPED_SET_STRIPING_POLICY_H +#define __CDS_INTRUSIVE_STRIPED_SET_STRIPING_POLICY_H + +#include +#include +#include +#include + +#include +//#include +//#include + + +namespace cds { namespace intrusive { namespace striped_set { + + /// Lock striping concurrent access policy + /** + This is one of available opt::mutex_policy option type for StripedSet + + Lock striping is very simple technique. + The set consists of the bucket table and the array of locks. + Initially, the capacity of lock array and bucket table is the same. + When set is resized, bucket table capacity will be doubled but lock array will not. + The lock \p i protects each bucket \p j, where j = i mod L , + where \p L - the size of lock array. + + The policy contains an internal array of \p Lock locks. + + Template arguments: + - \p Lock - the type of mutex. The default is \p cds_std::mutex. The mutex type should be default-constructible. + Note that a spin-lock is not so good suitable for lock striping for performance reason. + - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR. + */ + template + class striping + { + public: + typedef Lock lock_type ; ///< lock type + typedef Alloc allocator_type ; ///< allocator type + + typedef cds::lock::array< lock_type, cds::lock::pow2_select_policy, allocator_type > lock_array_type ; ///< lock array type + + protected: + //@cond + lock_array_type m_Locks; + //@endcond + + public: + //@cond + class scoped_cell_lock { + cds::lock::scoped_lock< lock_array_type > m_guard; + + public: + scoped_cell_lock( striping& policy, size_t nHash ) + : m_guard( policy.m_Locks, nHash ) + {} + }; + + class scoped_full_lock { + cds::lock::scoped_lock< lock_array_type > m_guard; + public: + scoped_full_lock( striping& policy ) + : m_guard( policy.m_Locks ) + {} + }; + + class scoped_resize_lock: public scoped_full_lock { + public: + scoped_resize_lock( striping& policy ) + : scoped_full_lock( policy ) + {} + + bool success() const + { + return true; + } + }; + //@endcond + + public: + /// Constructor + striping( + size_t nLockCount ///< The size of lock array. Must be power of two. + ) + : m_Locks( nLockCount, cds::lock::pow2_select_policy( nLockCount )) + {} + + /// Returns lock array size + /** + Lock array size is unchanged during \p striped object lifetime + */ + size_t lock_count() const + { + return m_Locks.size(); + } + + //@cond + void resize( size_t /*nNewCapacity*/ ) + {} + //@endcond + }; + + + /// Refinable concurrent access policy + /** + This is one of available opt::mutex_policy option type for StripedSet + + Refining is like a striping technique (see striped_set::striping) + but it allows growing the size of lock array when resizing the hash table. + So, the sizes of hash table and lock array are equal. + + Template arguments: + - \p RecursiveLock - the type of mutex. Reentrant (recursive) mutex is required. + The default is \p cds_std::recursive_mutex. The mutex type should be default-constructible. + - \p BackOff - back-off strategy. Default is cds::backoff::yield + - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR. + */ + template < + class RecursiveLock = cds_std::recursive_mutex, + typename BackOff = cds::backoff::yield, + class Alloc = CDS_DEFAULT_ALLOCATOR> + class refinable + { + public: + typedef RecursiveLock lock_type ; ///< lock type + typedef BackOff back_off ; ///< back-off strategy used + typedef Alloc allocator_type; ///< allocator type + + protected: + //@cond + typedef cds::lock::trivial_select_policy lock_selection_policy; + + class lock_array_type + : public cds::lock::array< lock_type, lock_selection_policy, allocator_type > + , public std::enable_shared_from_this< lock_array_type > + { + typedef cds::lock::array< lock_type, lock_selection_policy, allocator_type > lock_array_base; + public: + lock_array_type( size_t nCapacity ) + : lock_array_base( nCapacity ) + {} + }; + typedef std::shared_ptr< lock_array_type > lock_array_ptr; + typedef cds::details::Allocator< lock_array_type, allocator_type > lock_array_allocator; + + typedef unsigned long long owner_t; + typedef cds::OS::ThreadId threadId_t; + + typedef cds::lock::Spin spinlock_type; + typedef cds::lock::scoped_lock< spinlock_type > scoped_spinlock; + //@endcond + + protected: + //@cond + static owner_t const c_nOwnerMask = (((owner_t) 1) << (sizeof(owner_t) * 8 - 1)) - 1; + + lock_array_ptr m_arrLocks ; ///< Lock array. The capacity of array is specified in constructor. + CDS_ATOMIC::atomic< owner_t > m_Owner ; ///< owner mark (thread id + boolean flag) + CDS_ATOMIC::atomic m_nCapacity ; ///< Lock array capacity + spinlock_type m_access ; ///< access to m_arrLocks + //@endcond + + protected: + //@cond + struct lock_array_disposer { + void operator()( lock_array_type * pArr ) + { + lock_array_allocator().Delete( pArr ); + } + }; + + lock_array_ptr create_lock_array( size_t nCapacity ) + { + m_nCapacity.store( nCapacity, CDS_ATOMIC::memory_order_relaxed ); + return lock_array_ptr( lock_array_allocator().New( nCapacity ), lock_array_disposer() ); + } + + lock_type& acquire( size_t nHash ) + { + owner_t me = (owner_t) cds::OS::getCurrentThreadId(); + owner_t who; + + back_off bkoff; + while ( true ) { + // wait while resizing + while ( true ) { + who = m_Owner.load( CDS_ATOMIC::memory_order_acquire ); + if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) ) + break; + bkoff(); + } + + lock_array_ptr pLocks; + { + scoped_spinlock sl(m_access); + pLocks = m_arrLocks; + } + + lock_type& lock = pLocks->at( nHash & (pLocks->size() - 1)); + lock.lock(); + + who = m_Owner.load( CDS_ATOMIC::memory_order_acquire ); + if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks == pLocks ) + return lock; + lock.unlock(); + } + } + + lock_array_ptr acquire_all() + { + owner_t me = (owner_t) cds::OS::getCurrentThreadId(); + owner_t who; + + back_off bkoff; + while ( true ) { + // wait while resizing + while ( true ) { + who = m_Owner.load( CDS_ATOMIC::memory_order_acquire ); + if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) ) + break; + bkoff(); + } + + lock_array_ptr pLocks; + { + scoped_spinlock sl(m_access); + pLocks = m_arrLocks; + } + + pLocks->lock_all(); + + who = m_Owner.load( CDS_ATOMIC::memory_order_acquire ); + if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks == pLocks ) + return pLocks; + + pLocks->unlock_all(); + } + } + + void release_all( lock_array_ptr p ) + { + p->unlock_all(); + } + + bool acquire_resize() + { + owner_t me = (owner_t) cds::OS::getCurrentThreadId(); + + back_off bkoff; + for (unsigned int nAttempts = 0; nAttempts < 32; ++nAttempts ) { + owner_t ownNull = 0; + if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) { + lock_array_ptr pOldLocks = m_arrLocks; + size_t const nLockCount = pOldLocks->size(); + for ( size_t i = 0; i < nLockCount; ++i ) { + typename lock_array_type::lock_type& lock = pOldLocks->at(i); + bkoff.reset(); + while ( !lock.try_lock() ) + bkoff(); + lock.unlock(); + } + return true; + } + else + bkoff(); + } + return false; + } + + void release_resize() + { + m_Owner.store( 0, CDS_ATOMIC::memory_order_release ); + } + //@endcond + public: + //@cond + class scoped_cell_lock { + cds::lock::scoped_lock< lock_type > m_guard; + + public: + scoped_cell_lock( refinable& policy, size_t nHash ) + : m_guard( policy.acquire( nHash ), true ) + {} + }; + + class scoped_full_lock { + refinable& m_Policy; + lock_array_ptr m_Locks; + public: + scoped_full_lock( refinable& policy ) + : m_Policy( policy ) + { + m_Locks = policy.acquire_all(); + } + ~scoped_full_lock() + { + m_Policy.release_all( m_Locks ); + } + }; + + class scoped_resize_lock { + refinable& m_Policy; + bool m_bSucceess; + + public: + scoped_resize_lock( refinable& policy ) + : m_Policy( policy ) + { + m_bSucceess = policy.acquire_resize(); + } + + ~scoped_resize_lock() + { + if ( m_bSucceess ) + m_Policy.release_resize(); + } + + bool success() const + { + return m_bSucceess; + } + }; + //@endcond + + public: + /// Constructor + refinable( + size_t nLockCount ///< Initial size of lock array. Must be power of two. + ) + : m_Owner(0) + , m_nCapacity( nLockCount ) + { + assert( cds::beans::is_power2( nLockCount )); + m_arrLocks = create_lock_array( nLockCount ); + } + + /// Returns lock array size + /** + Lock array size is not a constant for \p refinable policy and can be changed when the set is resized. + */ + size_t lock_count() const + { + return m_nCapacity.load( CDS_ATOMIC::memory_order_relaxed ); + } + + /// Resize for new capacity + void resize( size_t nNewCapacity ) + { + // Expect the access is locked by scoped_resize_lock!!! + lock_array_ptr pNewArr = create_lock_array( nNewCapacity ); + scoped_spinlock sl(m_access); + m_arrLocks.swap( pNewArr ); + } + }; + +}}} // namespace cds::intrusive::striped_set + +#endif diff --git a/cds/intrusive/treiber_stack.h b/cds/intrusive/treiber_stack.h new file mode 100644 index 00000000..713790b4 --- /dev/null +++ b/cds/intrusive/treiber_stack.h @@ -0,0 +1,696 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_TREIBER_STACK_H +#define __CDS_INTRUSIVE_TREIBER_STACK_H + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// TreiberStack related definitions + /** @ingroup cds_intrusive_helper + */ + namespace treiber_stack { + + //@cond + /// Operation id for the \ref cds_elimination_description "elimination back-off" + enum operation_id { + op_push, ///< push op id + op_pop ///< pop op id + }; + + /// Operation descriptor for the \ref cds_elimination_description "elimination back-off" + template + struct operation: public cds::algo::elimination::operation_desc + { + operation_id idOp; ///< Op id + T * pVal; ///< for push: pointer to argument; for pop: accepts a return value + CDS_ATOMIC::atomic nStatus; ///< Internal elimination status + + operation() + : pVal( null_ptr() ) + , nStatus(0) + {} + }; + //@endcond + + /// Stack internal statistics. May be useful for debugging or profiling + /** + Template argument \p Counter defines type of counter. + Default is cds::atomicity::event_counter. + You may use stronger type of counter like as cds::atomicity::item_counter, + or even an integral type, for example, \p int + */ + template + struct stat + { + typedef Counter counter_type ; ///< Counter type + + counter_type m_PushCount ; ///< Push call count + counter_type m_PopCount ; ///< Pop call count + counter_type m_PushRace ; ///< Count of push race conditions encountered + counter_type m_PopRace ; ///< Count of pop race conditions encountered + counter_type m_ActivePushCollision ; ///< Count of active push collision for elimination back-off + counter_type m_ActivePopCollision ; ///< Count of active pop collision for elimination back-off + counter_type m_PassivePushCollision ; ///< Count of passive push collision for elimination back-off + counter_type m_PassivePopCollision ; ///< Count of passive pop collision for elimination back-off + counter_type m_EliminationFailed ; ///< Count of unsuccessful elimination back-off + + //@cond + void onPush() { ++m_PushCount; } + void onPop() { ++m_PopCount; } + void onPushRace() { ++m_PushRace; } + void onPopRace() { ++m_PopRace; } + void onActiveCollision( operation_id opId ) + { + if ( opId == treiber_stack::op_push ) + ++m_ActivePushCollision; + else + ++m_ActivePopCollision; + } + void onPassiveCollision( operation_id opId ) + { + if ( opId == treiber_stack::op_push ) + ++m_PassivePushCollision; + else + ++m_PassivePopCollision; + } + void onEliminationFailed() { ++m_EliminationFailed;} + //@endcond + }; + + /// Empty (no overhead) stack statistics. Support interface like treiber_stack::stat + struct empty_stat + { + //@cond + void onPush() {} + void onPop() {} + void onPushRace() {} + void onPopRace() {} + void onActiveCollision( operation_id ) {} + void onPassiveCollision( operation_id ) {} + void onEliminationFailed() {} + //@endcond + }; + + //@cond + namespace details { + + template + class elimination_backoff; + + template + class elimination_backoff + { + typedef typename Traits::back_off back_off; + + back_off m_bkoff; + public: + elimination_backoff() + {} + + elimination_backoff( size_t ) + {} + + void reset() + { + m_bkoff.reset(); + } + + template + bool backoff(treiber_stack::operation< T >&, Stat& ) + { + m_bkoff(); + return false; + } + }; + + template + class elimination_backoff + { + typedef typename Traits::back_off back_off; + + /// Back-off for elimination (usually delay) + typedef typename Traits::elimination_backoff elimination_backoff_type; + /// Lock type used in elimination back-off + typedef typename Traits::lock_type elimination_lock_type; + /// Random engine used in elimination back-off + typedef typename Traits::random_engine elimination_random_engine; + + /// Per-thread elimination record + typedef cds::algo::elimination::record elimination_rec; + + /// Collision array record + struct collision_array_record { + elimination_rec * pRec; + elimination_lock_type lock; + }; + + /// Collision array used in elimination-backoff; each item is optimized for cache-line size + typedef typename Traits::buffer::template rebind< + typename cds::details::type_padding::type + >::other collision_array; + + /// Operation descriptor used in elimination back-off + typedef treiber_stack::operation< T > operation_desc; + +# if !(defined(CDS_CXX11_LAMBDA_SUPPORT) && !(CDS_COMPILER == CDS_COMPILER_MSVC && CDS_COMPILER_VERSION == CDS_COMPILER_MSVC10)) + struct bkoff_predicate { + operation_desc * pOp; + bkoff_predicate( operation_desc * p ): pOp(p) {} + bool operator()() { return pOp->nStatus.load( CDS_ATOMIC::memory_order_acquire ) != op_busy; } + }; +# endif + + /// Elimination back-off data + struct elimination_data { + elimination_random_engine randEngine; ///< random engine + collision_array collisions; ///< collision array + + elimination_data() + { + //TODO: check Traits::buffer must be static! + } + elimination_data( size_t nCollisionCapacity ) + : collisions( nCollisionCapacity ) + {} + }; + + elimination_data m_Elimination; + + enum operation_status { + op_free = 0, + op_busy = 1, + op_collided = 2 + }; + + typedef cds::lock::scoped_lock< elimination_lock_type > slot_scoped_lock; + + public: + elimination_backoff() + { + m_Elimination.collisions.zeroize(); + } + + elimination_backoff( size_t nCollisionCapacity ) + : m_Elimination( nCollisionCapacity ) + { + m_Elimination.collisions.zeroize(); + } + + void reset() + {} + + template + bool backoff( operation_desc& op, Stat& stat ) + { + elimination_backoff_type bkoff; + op.nStatus.store( op_busy, CDS_ATOMIC::memory_order_relaxed ); + + elimination_rec * myRec = cds::algo::elimination::init_record( op ); + + collision_array_record& slot = m_Elimination.collisions[m_Elimination.randEngine() % m_Elimination.collisions.capacity()]; + { + slot.lock.lock(); + elimination_rec * himRec = slot.pRec; + if ( himRec ) { + operation_desc * himOp = static_cast( himRec->pOp ); + assert( himOp ); + if ( himOp->idOp != op.idOp ) { + if ( op.idOp == treiber_stack::op_push ) + himOp->pVal = op.pVal; + else + op.pVal = himOp->pVal; + slot.pRec = null_ptr(); + slot.lock.unlock(); + + himOp->nStatus.store( op_collided, CDS_ATOMIC::memory_order_release ); + cds::algo::elimination::clear_record(); + stat.onActiveCollision( op.idOp ); + return true; + } + himOp->nStatus.store( op_free, CDS_ATOMIC::memory_order_release ); + } + slot.pRec = myRec; + slot.lock.unlock(); + } + + // Wait for colliding operation +# if defined(CDS_CXX11_LAMBDA_SUPPORT) && !(CDS_COMPILER == CDS_COMPILER_MSVC && CDS_COMPILER_VERSION == CDS_COMPILER_MSVC10) + // MSVC++ 2010 compiler error C2065: 'op_busy' : undeclared identifier + bkoff( [&op]() -> bool { return op.nStatus.load( CDS_ATOMIC::memory_order_acquire ) != op_busy; } ); +# else + // Local structs is not supported by old compilers (for example, GCC 4.3) + //struct bkoff_predicate { + // operation_desc * pOp; + // bkoff_predicate( operation_desc * p ): pOp(p) {} + // bool operator()() { return pOp->nStatus.load( CDS_ATOMIC::memory_order_acquire ) != op_busy; } + //}; + bkoff( bkoff_predicate(&op) ); +# endif + + { + slot_scoped_lock l( slot.lock ); + if ( slot.pRec == myRec ) + slot.pRec = null_ptr(); + } + + bool bCollided = op.nStatus.load( CDS_ATOMIC::memory_order_acquire ) == op_collided; + + if ( !bCollided ) + stat.onEliminationFailed(); + else + stat.onPassiveCollision( op.idOp ); + + cds::algo::elimination::clear_record(); + return bCollided; + } + }; + + } // namespace details + //@endcond + } // namespace treiber_stack + + /// Treiber stack + /** @ingroup cds_intrusive_stack + Intrusive implementation of well-known Treiber's stack algorithm: + - R. K. Treiber. Systems programming: Coping with parallelism. Technical Report RJ 5118, IBM Almaden Research Center, April 1986. + + \ref cds_elimination_description "Elimination back-off technique" can be used optionally. + The idea of elimination algorithm is taken from: + - [2004] Danny Hendler, Nir Shavit, Lena Yerushalmi "A Scalable Lock-free Stack Algorithm" + + The elimination algorithm uses a single elimination array as a back-off schema + on a shared lock-free stack. If the threads fail on the stack, they attempt to eliminate + on the array, and if they fail in eliminating, they attempt to access the stack again and so on. + + @note Hendler's et al paper describes a lock-free implementation of elimination back-off which is quite complex. + The main difficulty is the managing life-time of elimination record. + Our implementation uses simplified lock-based (spin-based) approach which allows + the elimination record allocation on thread's stack. + This approach demonstrates sufficient performance under high load. + + Template arguments: + - \p GC - garbage collector type: gc::HP, gc::HRC, gc::PTB + - \p T - type to be inserted into the stack + - \p Options - options + + \p Options are: + - opt::hook - hook used. Possible values are: single_link::base_hook, single_link::member_hook, single_link::traits_hook. + If the option is not specified, single_link::base_hook<> is used. + For Gidenstam's gc::HRC, only single_link::base_hook is supported. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::Default is used. + - opt::disposer - the functor used for dispose removed items. Default is opt::v::empty_disposer. This option is used only + in \ref clear function. + - opt::link_checker - the type of node's link fields checking. Default is \ref opt::debug_check_link. + Note: for gc::HRC garbage collector, link checking policy is always selected as \ref opt::always_check_link. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter + - opt::stat - the type to gather internal statistics. + Possible option value are: \ref treiber_stack::stat, \ref treiber_stack::empty_stat (the default), + user-provided class that supports treiber_stack::stat interface. + - opt::enable_elimination - enable elimination back-off for the stack. Default value is \p valse. + + If elimination back-off is enabled (\p %cds::opt::enable_elimination< true >) additional options can be specified: + - opt::buffer - a buffer type for elimination array, see \p opt::v::static_buffer, \p opt::v::dynamic_buffer. + The buffer can be any size: \p Exp2 template parameter of those classes can be \p false. + The size should be selected empirically for your application and hardware, there are no common rules for that. + Default is %opt::v::static_buffer< any_type, 4 > . + - opt::random_engine - a random engine to generate a random position in elimination array. + Default is opt::v::c_rand. + - opt::elimination_backoff - back-off strategy to wait for elimination, default is cds::backoff::delay<> + - opt::lock_type - a lock type used in elimination back-off, default is cds::lock::Spin. + + Garbage collecting schema \p GC must be consistent with the single_link::node GC. + + Be careful when you want destroy an item popped, see \ref cds_intrusive_item_destroying "Destroying items of intrusive containers". + + @anchor cds_intrusive_TreiberStack_examples + \par Examples + + Example of how to use \p single_link::base_hook. + Your class that objects will be pushed on \p %TreiberStack should be based on \p single_link::node class + \code + #include + #include + + namespace ci = cds::intrusive; + typedef cds::gc::HP gc; + + struct myData: public ci::single_link::node< gc > + { + // ... + }; + + // Stack type + typedef ci::TreiberStack< gc, + myData, + ci::opt::hook< ci::single_link::base_hook< gc > > + > stack_t; + + // Stack with elimination back-off enabled + typedef ci::TreiberStack< gc, + myData, + ci::opt::hook< ci::single_link::base_hook< gc > >, + cds::opt::enable_elimination + > elimination_stack_t; + \endcode + + Example of how to use \p base_hook with different tags. + \code + #include + #include + + namespace ci = cds::intrusive; + typedef cds::gc::HP gc; + + // It is not necessary to declare complete type for tags + struct tag1; + struct tag2; + + struct myData + : public ci::single_link::node< gc, tag1 > + , public ci::single_link::node< gc, tag2 > + { + // ... + }; + + typedef ci::TreiberStack< gc, myData, ci::opt::hook< ci::single_link::base_hook< gc, tag1 > > stack1_t; + typedef ci::TreiberStack< gc, myData, ci::opt::hook< ci::single_link::base_hook< gc, tag2 > > stack2_t; + + // You may add myData objects in the objects of type stack1_t and stack2_t independently + void foo() { + stack1_t s1; + stack2_t s2; + + myData i1, i2; + s1.push( i1 ); + s2.push( i2 ); + s2.push( i1 ) ; // i1 is now contained in s1 and s2. + + myData * p; + + p = s1.pop() ; // pop i1 from s1 + p = s1.pop() ; // p == NULL, s1 is empty + p = s2.pop() ; // pop i1 from s2 + p = s2.pop() ; // pop i2 from s2 + p = s2.pop() ; // p == NULL, s2 is empty + } + \endcode + + Example of how to use \p member_hook. + Your class that will be pushed on \p %TreiberStack should have a member of type \p single_link::node + \code + #include + #include + #include // offsetof macro + + namespace ci = cds::intrusive; + typedef cds::gc::HP gc; + + struct myData + { + // ... + ci::single_link::node< gc > member_hook_; + // ... + }; + + typedef ci::TreiberStack< gc, myData, + ci::opt::hook< + ci::single_link::member_hook< offsetof(myData, member_hook_), + gc + > + > stack_t; + \endcode + */ + template + class TreiberStack + { + //@cond + struct default_options + { + typedef cds::backoff::Default back_off; + typedef single_link::base_hook<> hook; + typedef opt::v::empty_disposer disposer; + typedef atomicity::empty_item_counter item_counter; + typedef opt::v::relaxed_ordering memory_model; + typedef treiber_stack::empty_stat stat; + static CDS_CONSTEXPR_CONST opt::link_check_type link_checker = opt::debug_check_link; + + // Elimination back-off options + static CDS_CONSTEXPR_CONST bool enable_elimination = false; + typedef cds::backoff::delay<> elimination_backoff; + typedef opt::v::static_buffer< int, 4 > buffer; + typedef opt::v::c_rand random_engine; + typedef cds::lock::Spin lock_type; + }; + //@endcond + + public: + //@cond + typedef typename opt::make_options< + typename cds::opt::find_type_traits< default_options, CDS_OPTIONS13 >::type + ,CDS_OPTIONS13 + >::type options; + //@endcond + + public: + /// Rebind template arguments + template + struct rebind { + typedef TreiberStack< GC2, T2, CDS_OTHER_OPTIONS13> other ; ///< Rebinding result + }; + + public: + typedef T value_type ; ///< type of value stored in the stack + typedef typename options::hook hook ; ///< hook type + typedef typename hook::node_type node_type ; ///< node type + typedef typename options::disposer disposer ; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + typedef typename single_link::get_link_checker< node_type, options::link_checker >::type link_checker ; ///< link checker + typedef typename options::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename options::item_counter item_counter ; ///< Item counting policy used + typedef typename options::stat stat ; ///< Internal statistics policy used + + typedef GC gc ; ///< Garbage collector + typedef typename options::back_off back_off ; ///< back-off strategy + + public: // related to elimination back-off + + /// Elimination back-off is enabled or not + static CDS_CONSTEXPR_CONST bool enable_elimination = options::enable_elimination; + /// back-off strategy used to wait for elimination + typedef typename options::elimination_backoff elimination_backoff_type; + /// Lock type used in elimination back-off + typedef typename options::lock_type elimination_lock_type; + /// Random engine used in elimination back-off + typedef typename options::random_engine elimination_random_engine; + + + protected: + typename node_type::atomic_node_ptr m_Top ; ///< Top of the stack + item_counter m_ItemCounter ; ///< Item counter + stat m_stat ; ///< Internal statistics + + //@cond + treiber_stack::details::elimination_backoff m_Backoff; + + typedef intrusive::node_to_value node_to_value; + typedef treiber_stack::operation< value_type > operation_desc; + //@endcond + + protected: + //@cond + void clear_links( node_type * pNode ) CDS_NOEXCEPT + { + pNode->m_pNext.store( null_ptr(), memory_model::memory_order_relaxed ); + } + + template + struct elimination_backoff_impl; + + void init() + { + // GC and node_type::gc must be the same + static_assert(( std::is_same::value ), "GC and node_type::gc must be the same"); + + // For cds::gc::HRC, only base_hook is allowed + static_assert(( + std::conditional< + std::is_same::value, + std::is_same< typename hook::hook_type, opt::base_hook_tag >, + boost::true_type + >::type::value + ), "For cds::gc::HRC, only base_hook is allowed"); + + static_assert( (!enable_elimination || std::is_same::value), + "Random engine result type must be unsigned int" ); + } + + //@endcond + + public: + /// Constructs empty stack + TreiberStack() + : m_Top(null_ptr()) + { + init(); + } + + /// Constructs empty stack and initializes elimination back-off data + /** + This form should be used if you use elimination back-off with dynamically allocated collision array, i.e + \p Options... contains cds::opt::buffer< cds::opt::v::dynamic_buffer >. + \p nCollisionCapacity parameter specifies the capacity of collision array. + */ + TreiberStack( size_t nCollisionCapacity ) + : m_Top(null_ptr()) + , m_Backoff( nCollisionCapacity ) + { + init(); + } + + /// Destructor calls \ref cds_intrusive_TreiberStack_clear "clear" member function + ~TreiberStack() + { + clear(); + } + + /// Push the item \p val on the stack + /** + No copying is made since it is intrusive stack. + */ + bool push( value_type& val ) + { + node_type * pNew = node_traits::to_node_ptr( val ); + link_checker::is_empty( pNew ); + + m_Backoff.reset(); + + operation_desc op; + if ( enable_elimination ) { + op.idOp = treiber_stack::op_push; + op.pVal = &val; + } + + node_type * t = m_Top.load(memory_model::memory_order_relaxed); + while ( true ) { + pNew->m_pNext.store( t, memory_model::memory_order_relaxed ); + if ( m_Top.compare_exchange_weak( t, pNew, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { // #1 sync-with #2 + ++m_ItemCounter; + m_stat.onPush(); + return true; + } + m_stat.onPushRace(); + + if ( m_Backoff.backoff( op, m_stat )) + return true; + } + } + + /// Pop an item from the stack + /** + If stack is empty, returns \p NULL. + The disposer is not called for popped item. + See \ref cds_intrusive_item_destroying "Destroying items of intrusive containers". + */ + value_type * pop() + { + m_Backoff.reset(); + typename gc::Guard guard; + + operation_desc op; + if ( enable_elimination ) { + op.idOp = treiber_stack::op_pop; + } + + while ( true ) { + node_type * t = guard.protect( m_Top, node_to_value() ); + if ( t == null_ptr() ) + return null_ptr() ; // stack is empty + + node_type * pNext = t->m_pNext.load(memory_model::memory_order_relaxed); + if ( m_Top.compare_exchange_weak( t, pNext, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) { // #2 + clear_links( t ); + --m_ItemCounter; + m_stat.onPop(); + return node_traits::to_value_ptr( *t ); + } + + m_stat.onPopRace(); + if ( m_Backoff.backoff( op, m_stat )) { + // may return NULL if stack is empty + return op.pVal; + } + } + } + + /// Check if stack is empty + bool empty() const + { + // http://www.manning-sandbox.com/thread.jspa?threadID=46245&tstart=0 + return m_Top.load(memory_model::memory_order_relaxed) == null_ptr(); + } + + /// Clear the stack + /** @anchor cds_intrusive_TreiberStack_clear + For each removed item the disposer is called. + + Caution + It is possible that after clear() the empty() returns \p false + if some other thread pushes an item into the stack during \p clear works + */ + void clear() + { + back_off bkoff; + node_type * pTop; + while ( true ) { + pTop = m_Top.load( memory_model::memory_order_relaxed ); + if ( pTop == null_ptr() ) + return; + if ( m_Top.compare_exchange_weak( pTop, null_ptr(), memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed )) { // sync-with #1 and #2 + m_ItemCounter.reset(); + break; + } + bkoff(); + } + + while( pTop ) { + node_type * p = pTop; + pTop = p->m_pNext.load(memory_model::memory_order_relaxed); + clear_links( p ); + gc::template retire( node_traits::to_value_ptr( *p ) ); + } + } + + /// Returns stack's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the stack + is empty. To check emptyness use \ref empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns reference to internal statistics + stat const& statistics() const + { + return m_stat; + } + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_TREIBER_STACK_H diff --git a/cds/intrusive/tsigas_cycle_queue.h b/cds/intrusive/tsigas_cycle_queue.h new file mode 100644 index 00000000..73131d34 --- /dev/null +++ b/cds/intrusive/tsigas_cycle_queue.h @@ -0,0 +1,370 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_TSIGAS_CYCLE_QUEUE_H +#define __CDS_INTRUSIVE_TSIGAS_CYCLE_QUEUE_H + +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// Non-blocking cyclic queue discovered by Philippas Tsigas and Yi Zhang + /** @ingroup cds_intrusive_queue + + Source: + \li [2000] Philippas Tsigas, Yi Zhang "A Simple, Fast and Scalable Non-Blocking Concurrent FIFO Queue + for Shared Memory Multiprocessor Systems" + + Template arguments: + - T - data stored in queue. The queue stores pointers to passed data of type \p T. + Restriction: the queue can manage at least two-byte aligned data: the least significant bit (LSB) + of any pointer stored in the queue must be zero since the algorithm may use LSB + as a flag that marks the free cell. + - Options - options + + \p Options are: + - opt::buffer - buffer to store items. Mandatory option, see option description for full list of possible types. + - opt::disposer - the functor used for dispose removed items. Default is opt::v::empty_disposer. This option is used + only in \ref clear function. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. + - opt::alignment - the alignment for internal queue data. Default is opt::cache_line_alignment + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + + This queue algorithm does not require any garbage collector. + + \par Examples: + \code + #include + + struct Foo { + ... + }; + + // Queue of Foo pointers, capacity is 1024, statically allocated buffer: + typedef cds::intrusive::TsigasCycleQueue< + Foo + ,cds::opt::buffer< cds::opt::v::static_buffer< Foo, 1024 > > + > static_queue; + static_queue stQueue; + + // Queue of Foo pointers, capacity is 1024, dynamically allocated buffer: + typedef cds::intrusive::TsigasCycleQueue< + Foo + ,cds::opt::buffer< cds::opt::v::dynamic_buffer< Foo > > + > dynamic_queue; + dynamic_queue dynQueue( 1024 ); + \endcode + */ + template + class TsigasCycleQueue: public cds::bounded_container + { + //@cond + struct default_options + { + typedef cds::backoff::empty back_off; + typedef opt::v::empty_disposer disposer; + typedef atomicity::empty_item_counter item_counter; + typedef opt::v::relaxed_ordering memory_model; + enum { alignment = opt::cache_line_alignment }; + }; + //@endcond + + public: + //@cond + typedef typename opt::make_options< + typename cds::opt::find_type_traits< default_options, CDS_OPTIONS7>::type + ,CDS_OPTIONS7 + >::type options; + //@endcond + + public: + /// Rebind template arguments + template + struct rebind { + typedef TsigasCycleQueue< T2, CDS_OTHER_OPTIONS7> other ; ///< Rebinding result + }; + + public: + typedef T value_type ; ///< type of value stored in the queue + typedef typename options::item_counter item_counter; ///< Item counter type + typedef typename options::disposer disposer ; ///< Item disposer + typedef typename options::back_off back_off ; ///< back-off strategy used + typedef typename options::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + + protected: + //@cond + typedef typename options::buffer::template rebind< CDS_ATOMIC::atomic >::other buffer; + typedef typename opt::details::alignment_setter< buffer, options::alignment >::type aligned_buffer; + typedef size_t index_type; + typedef typename opt::details::alignment_setter< CDS_ATOMIC::atomic, options::alignment >::type aligned_index; + //@endcond + + protected: + //@cond + buffer m_buffer ; ///< array of pointer T *, array size is equal to m_nCapacity+1 + aligned_index m_nHead ; ///< index of queue's head + aligned_index m_nTail ; ///< index of queue's tail + item_counter m_ItemCounter ; ///< item counter + //@endcond + + protected: + //@cond + static CDS_CONSTEXPR value_type * free0() CDS_NOEXCEPT + { + return null_ptr(); + } + static CDS_CONSTEXPR value_type * free1() CDS_NOEXCEPT + { + return (value_type*) 1; + } + static bool is_free( const value_type * p ) CDS_NOEXCEPT + { + return p == free0() || p == free1(); + } + + size_t buffer_capacity() const CDS_NOEXCEPT + { + return m_buffer.capacity(); + } + + index_type modulo() const CDS_NOEXCEPT + { + return buffer_capacity() - 1; + } + //@endcond + + public: + /// Initialize empty queue of capacity \p nCapacity + /** + For cds::opt::v::static_buffer the \p nCapacity parameter is ignored. + + Note that the real capacity of queue is \p nCapacity - 2. + */ + TsigasCycleQueue( size_t nCapacity = 0 ) + : m_buffer( nCapacity ) + , m_nHead(0) + , m_nTail(1) + { + m_buffer.zeroize(); + } + + /// Clears the queue + ~TsigasCycleQueue() + { + clear(); + } + + /// Returns queue's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + */ + size_t size() const CDS_NOEXCEPT + { + return m_ItemCounter.value(); + } + + /// Returns capacity of cyclic buffer + size_t capacity() const CDS_NOEXCEPT + { + return buffer_capacity() - 2; + } + + /// Enqueues item from the queue + /** @anchor cds_intrusive_TsigasQueue_enqueue + Returns \p true if success, \p false otherwise (for example, if queue is full) + */ + bool enqueue( value_type& data ) + { + value_type * pNewNode = &data; + assert( (reinterpret_cast( pNewNode ) & 1) == 0 ); + back_off bkoff; + + const index_type nModulo = modulo(); + + do { + index_type te = m_nTail.load(memory_model::memory_order_acquire); + index_type ate = te; + value_type * tt = m_buffer[ ate ].load(memory_model::memory_order_relaxed); + index_type temp = ( ate + 1 ) & nModulo ; // next item after tail + + // Looking for actual tail + while ( !is_free( tt ) ) { + if ( te != m_nTail.load(memory_model::memory_order_relaxed) ) // check the tail consistency + goto TryAgain; + if ( temp == m_nHead.load(memory_model::memory_order_acquire) ) // queue full? + break; + tt = m_buffer[ temp ].load(memory_model::memory_order_relaxed); + ate = temp; + temp = (temp + 1) & nModulo; + } + + if ( te != m_nTail.load(memory_model::memory_order_relaxed) ) + continue; + + // Check whether queue is full + if ( temp == m_nHead.load(memory_model::memory_order_acquire) ) { + ate = ( temp + 1 ) & nModulo; + tt = m_buffer[ ate ].load(memory_model::memory_order_relaxed); + if ( !is_free( tt ) ) { + return false ; // Queue is full + } + + // help the dequeue to update head + m_nHead.compare_exchange_strong( temp, ate, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + continue; + } + + if ( tt == free1() ) + pNewNode = reinterpret_cast(reinterpret_cast( pNewNode ) | 1); + if ( te != m_nTail.load(memory_model::memory_order_relaxed) ) + continue; + + // get actual tail and try to enqueue new node + if ( m_buffer[ate].compare_exchange_strong( tt, pNewNode, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) { + if ( temp % 2 == 0 ) + m_nTail.compare_exchange_strong( te, temp, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + ++m_ItemCounter; + return true; + } + TryAgain:; + } while ( bkoff(), true ); + + // No control path reaches this line! + return false; + } + + /// Dequeues item from the queue + /** @anchor cds_intrusive_TsigasQueue_dequeue + If the queue is empty the function returns \a NULL + + Dequeue does not call value disposer. You can manually dispose returned value if it is needed. + */ + value_type * dequeue() + { + back_off bkoff; + + const index_type nModulo = modulo(); + do { + index_type th = m_nHead.load(memory_model::memory_order_acquire); + index_type temp = ( th + 1 ) & nModulo; + value_type * tt = m_buffer[ temp ].load(memory_model::memory_order_relaxed); + value_type * pNull; + + // find the actual head after this loop + while ( is_free( tt ) ) { + if ( th != m_nHead.load(memory_model::memory_order_relaxed) ) + goto TryAgain; + + // two consecutive NULL means queue empty + if ( temp == m_nTail.load(memory_model::memory_order_acquire) ) + return NULL; + + temp = ( temp + 1 ) & nModulo; + tt = m_buffer[ temp ].load(memory_model::memory_order_relaxed); + } + + if ( th != m_nHead.load(memory_model::memory_order_relaxed) ) + continue; + + // check whether the queue is empty + if ( temp == m_nTail.load(memory_model::memory_order_acquire) ) { + // help the enqueue to update end + m_nTail.compare_exchange_strong( temp, (temp + 1) & nModulo, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + continue; + } + + pNull = (reinterpret_cast( tt ) & 1) ? free0() : free1(); + + if ( th != m_nHead.load(memory_model::memory_order_relaxed) ) + continue; + + // Get the actual head, null means empty + if ( m_buffer[temp].compare_exchange_strong( tt, pNull, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { + if ( temp % 2 == 0 ) + m_nHead.compare_exchange_strong( th, temp, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + --m_ItemCounter; + return reinterpret_cast(reinterpret_cast( tt ) & ~intptr_t(1)); + } + + TryAgain:; + } while ( bkoff(), true ); + + // No control path reaches this line! + return null_ptr(); + } + + /// Synonym of \ref cds_intrusive_TsigasQueue_enqueue "enqueue" + bool push( value_type& data ) + { + return enqueue( data ); + } + + /// Synonym of \ref cds_intrusive_TsigasQueue_dequeue "dequeue" + value_type * pop() + { + return dequeue(); + } + + /// Checks if the queue is empty + bool empty() const + { + const index_type nModulo = modulo(); + + TryAgain: + index_type th = m_nHead.load(memory_model::memory_order_relaxed); + index_type temp = ( th + 1 ) & nModulo; + const value_type * tt = m_buffer[ temp ].load(memory_model::memory_order_relaxed); + + // find the actual head after this loop + while ( is_free( tt ) ) { + if ( th != m_nHead.load(memory_model::memory_order_relaxed) ) + goto TryAgain; + // two consecutive NULL means queue empty + if ( temp == m_nTail.load(memory_model::memory_order_relaxed) ) + return true; + temp = ( temp + 1 ) & nModulo; + tt = m_buffer[ temp ].load(memory_model::memory_order_relaxed); + } + return false; + } + + /// Clears queue in lock-free manner. + /** + \p f parameter is a functor to dispose removed items. + The interface of \p DISPOSER is: + \code + struct myDisposer { + void operator ()( T * val ); + }; + \endcode + You can pass \p disposer by reference using \p boost::ref. + The disposer will be called immediately for each item. + */ + template + void clear( Disposer f ) + { + value_type * pv; + while ( (pv = pop()) != null_ptr() ) { + unref(f)( pv ); + } + } + + /// Clears the queue + /** + This function uses the disposer that is specified in \p Options. + */ + void clear() + { + clear( disposer() ); + } + }; + +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_TSIGAS_CYCLE_QUEUE_H diff --git a/cds/intrusive/vyukov_mpmc_cycle_queue.h b/cds/intrusive/vyukov_mpmc_cycle_queue.h new file mode 100644 index 00000000..be6c48d5 --- /dev/null +++ b/cds/intrusive/vyukov_mpmc_cycle_queue.h @@ -0,0 +1,176 @@ +//$$CDS-header$$ + +#ifndef __CDS_INTRUSIVE_VYUKOV_MPMC_CYCLE_QUEUE_H +#define __CDS_INTRUSIVE_VYUKOV_MPMC_CYCLE_QUEUE_H + +#include +#include + +namespace cds { namespace intrusive { + + /// Vyukov's MPMC bounded queue + /** @ingroup cds_intrusive_queue + This algorithm is developed by Dmitry Vyukov (see http://www.1024cores.net) + + Implementation of intrusive version is based on non-intrusive class container::VyukovMPMCCycleQueue. + + Template parameters: + - \p T - type stored in queue. + - \p Options - queue's options + + Options \p Options are: + - opt::buffer - buffer to store items. Mandatory option, see option description for full list of possible types. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter + - opt::disposer - the functor used for dispose removed items. Default is opt::v::empty_disposer. This option is used + only in \ref clear function. + - opt::alignment - the alignment for internal queue data. Default is opt::cache_line_alignment + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + + + Instead of saving copy of enqueued data, the intrusive implementation stores pointer to passed data. + + \par Examples: + \code + #include + + struct Foo { + ... + }; + + // Queue of Foo pointers, capacity is 1024, statically allocated buffer: + typedef cds::intrusive::VyukovMPMCCycleQueue< + Foo + ,cds::opt::buffer< cds::opt::v::static_buffer< Foo, 1024 > > + > static_queue; + static_queue stQueue; + + // Queue of Foo pointers, capacity is 1024, dynamically allocated buffer: + typedef cds::intrusive::VyukovMPMCCycleQueue< + Foo + ,cds::opt::buffer< cds::opt::v::dynamic_buffer< Foo > > + > dynamic_queue; + dynamic_queue dynQueue( 1024 ); + + \endcode + */ + template + class VyukovMPMCCycleQueue + : private container::VyukovMPMCCycleQueue< T *, CDS_OPTIONS6 > + { + //@cond + typedef container::VyukovMPMCCycleQueue< T *, CDS_OPTIONS6 > base_class; + //@endcond + public: + typedef T value_type ; ///< type of data stored in the queue + typedef typename base_class::item_counter item_counter ; ///< Item counter type + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::options::disposer disposer ; ///< Item disposer + + //@cond + typedef typename base_class::options options; + //@endcond + + public: + /// Rebind template arguments + template + struct rebind { + typedef VyukovMPMCCycleQueue< T2, CDS_OTHER_OPTIONS6> other ; ///< Rebinding result + }; + + public: + /// Constructs the queue of capacity \p nCapacity + /** + For cds::opt::v::static_buffer the \p nCapacity parameter is ignored. + */ + VyukovMPMCCycleQueue( size_t nCapacity = 0 ) + : base_class( nCapacity ) + {} + + /// Enqueues \p data to queue + /** + Note that the intrusive queue stores pointer to \p data passed, not the copy of data. + */ + bool enqueue( value_type& data ) + { + return base_class::enqueue( &data ); + } + + /// Dequeues an item from queue + /** + If queue is empty, returns \p NULL. + */ + value_type * dequeue() + { + value_type * p = null_ptr(); + return base_class::dequeue( p ) ? p : null_ptr(); + } + + /// Synonym of \ref enqueue + bool push( value_type& data ) + { + return enqueue( data ); + } + + /// Synonym of \ref dequeue + value_type * pop() + { + return dequeue(); + } + + /// Clears queue in lock-free manner. + /** + \p f parameter is a functor to dispose removed items. + The interface of \p DISPOSER is: + \code + struct myDisposer { + void operator ()( T * val ); + }; + \endcode + You can pass \p disposer by reference using \p boost::ref. + The disposer will be called immediately for each item. + */ + template + void clear( Disposer f ) + { + value_type * pv; + while ( (pv = pop()) != null_ptr() ) { + unref(f)( pv ); + } + } + + /// Clears the queue + /** + This function uses the disposer that is specified in \p Options. + */ + void clear() + { + clear( disposer() ); + } + + /// Checks if the queue is empty + bool empty() const + { + return base_class::empty(); + } + + + /// Returns queue's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns capacity of cyclic buffer + size_t capacity() const + { + return base_class::capacity(); + } + }; +}} // namespace cds::intrusive + +#endif // #ifndef __CDS_INTRUSIVE_VYUKOV_MPMC_CYCLE_QUEUE_H diff --git a/cds/lock/array.h b/cds/lock/array.h new file mode 100644 index 00000000..ce96cc0b --- /dev/null +++ b/cds/lock/array.h @@ -0,0 +1,324 @@ +//$$CDS-header$$ + +#ifndef __CDS_LOCK_ARRAY_H +#define __CDS_LOCK_ARRAY_H + +#include +#include +#include + +#include + +namespace cds { namespace lock { + + /// Trivial lock \ref array selection policy + struct trivial_select_policy + { + /// Returns \p nWhat + size_t operator()( size_t nWhat, size_t nCapacity ) const + { + assert( nWhat < nCapacity ); + return nWhat; + } + + /// Checks if \p nCapacity is acceptable by policy. For trivial policy, any \p nCapacity is accepted. + static bool is_capacity_accepted( size_t nCapacity ) + { + return true; + } + }; + + /// The lock \ref array cell selection policy "division by modulo" + struct mod_select_policy + { + /// Returns nWhat % nCapacity + size_t operator()( size_t nWhat, size_t nCapacity ) const + { + return nWhat % nCapacity; + } + + /// Checks if \p nCapacity is acceptable by policy. For modulo policy, any positive \p nCapacity is accepted. + static bool is_capacity_accepted( size_t nCapacity ) + { + return nCapacity > 0; + } + }; + + /// The lock \ref array cell selection policy "division by modulo of power of 2" + /** + This policy may be used if the size of lock array is equal to power of two. + */ + struct pow2_select_policy + { + //@cond + const size_t m_nMask; + //@endcond + + /// Ctor. \p nCapacity must be power of two. + pow2_select_policy( size_t nCapacity ) + : m_nMask( nCapacity - 1 ) + { + assert( is_capacity_accepted( nCapacity )); + } + + /// Copy constructor + pow2_select_policy( pow2_select_policy const& src ) + : m_nMask( src.m_nMask ) + {} + +# ifdef CDS_RVALUE_SUPPORT + /// Move constructor + pow2_select_policy( pow2_select_policy&& src ) + : m_nMask( src.m_nMask ) + {} +# endif + + /// Returns nWhat & (nPow2 - 1) + size_t operator()( size_t nWhat, size_t ) const + { + return nWhat & m_nMask; + } + + /// Checks if \p nCapacity is acceptable by policy. \p nCapacity must be power of two + static bool is_capacity_accepted( size_t nCapacity ) + { + return cds::beans::is_power2( nCapacity ); + } + }; + + /// Array of locks + /** + The lock array is useful for building fine-grained lock-based data structure + based on striping technique. Instead of locking access to data struct (a hash map, for example) + at whole, the striping locks only part of the map (a bucket). So, access to different buckets + can be simultaneous. + + Template arguments: + - \p Lock - lock type, for example, \p boost::mutex, \p cds::lock::Spinlock + - \p SelectPolicy - array cell selection policy, the default is \ref mod_select_policy + Available policies: \ref trivial_select_policy, \ref pow2_select_policy, \ref mod_select_policy. + - \p Alloc - memory allocator for array + + To determine array's cell the selection policy \p SelectPolicy functor is used. Two arguments + are passed to the policy: + \code size_t operator()( size_t nHint, size_t nCapacity ) const \endcode + - \p nHint - a hint to calculate cell index in the lock array. Usually, it is a hash value. + - \p nCapacity - the size of the lock array + The functor should return the index in the lock array. + + Note that the type of \p nHint parameter can be any. + */ + template + class array + { + //@cond + typedef ::cds::details::Allocator< Lock, Alloc > cxx_allocator; + //@endcond + public: + typedef Lock lock_type ; ///< lock type + typedef SelectPolicy select_cell_policy ; ///< Cell selection policy functor + static size_t const c_nUnspecifiedCell = (size_t) -1 ; ///< failed \ref try_lock call result + + protected: + lock_type * m_arrLocks ; ///< lock array + size_t const m_nCapacity ; ///< array capacity + + select_cell_policy m_SelectCellPolicy ; ///< Cell selection policy + + protected: + //@cond + static lock_type * create_lock_array( size_t nCapacity ) + { + return cxx_allocator().NewArray( nCapacity ); + } + static void delete_lock_array( lock_type * pArr, size_t nCapacity ) + { + if ( pArr ) + cxx_allocator().Delete( pArr, nCapacity ); + } + + // Only for internal use!!! + array() + : m_arrLocks( null_ptr() ) + , m_nCapacity(0) + {} + array( select_cell_policy const& policy ) + : m_arrLocks( null_ptr() ) + , m_nCapacity(0) + , m_SelectCellPolicy( policy ) + {} + //@endcond + + public: + /// Constructs array of locks + /** + Allocates the array and initializes all locks as unlocked. + */ + array( + size_t nCapacity ///< [in] Array size + ) + : m_arrLocks( null_ptr() ) + , m_nCapacity( nCapacity ) + { + m_arrLocks = create_lock_array( nCapacity ); + } + + /// Constructs array of lock and copy cell selection policy + /** + Allocates the array and initializes all locks as unlocked. + */ + array( + size_t nCapacity, ///< [in] Array size + select_cell_policy const& policy ///< Cell selection policy (copy-constructible) + ) + : m_arrLocks( null_ptr() ) + , m_nCapacity( nCapacity ) + , m_SelectCellPolicy( policy ) + { + m_arrLocks = create_lock_array( m_nCapacity ); + } + +# ifdef CDS_RVALUE_SUPPORT + /// Constructs array of lock and move cell selection policy + /** + Allocates the array and initializes all locks as unlocked. + */ + array( + size_t nCapacity, ///< [in] Array size + select_cell_policy&& policy ///< Cell selection policy (move-constructible) + ) + : m_arrLocks( null_ptr() ) + , m_nCapacity( nCapacity ) + , m_SelectCellPolicy( std::forward( policy )) + { + m_arrLocks = create_lock_array( m_nCapacity ); + } +# endif + + /// Destructs array of locks and frees used memory + ~array() + { + delete_lock_array( m_arrLocks, m_nCapacity ); + } + + /// Locks a lock at cell \p hint + /** + To define real array's cell which should be locked, \ref select_cell_policy is used. + The target cell is a result of select_cell_policy( hint, size() ). + + Returns the index of locked lock. + */ + template + size_t lock( Q const& hint ) + { + size_t nCell = m_SelectCellPolicy( hint, size() ); + assert( nCell < size() ); + lock_type& l = m_arrLocks[ nCell ]; + l.lock(); + return nCell; + } + + /// Try lock a lock at cell \p hint + /** + To define real array's cell which should be locked, \ref select_cell_policy is used. + The target cell is a result of select_cell_policy( hint, size() ). + + Returns the index of locked lock if success, \ref c_nUnspecifiedCell constant otherwise. + */ + template + size_t try_lock( Q const& hint ) + { + size_t nCell = m_SelectCellPolicy( hint, size() ); + assert( nCell < size() ); + lock_type& l = m_arrLocks[ nCell ]; + if ( l.try_lock() ) + return nCell; + return c_nUnspecifiedCell; + } + + /// Unlock the lock specified by index \p nCell + void unlock( size_t nCell ) + { + assert( nCell < size() ); + m_arrLocks[nCell].unlock(); + } + + /// Lock all + void lock_all() + { + lock_type * pLock = m_arrLocks; + for ( size_t i = 0; i < size(); ++i, ++pLock ) + pLock->lock(); + } + + /// Unlock all + void unlock_all() + { + lock_type * pLock = m_arrLocks; + for ( size_t i = 0; i < size(); ++i, ++pLock ) + pLock->unlock(); + } + + /// Get lock at cell \p nCell. + /** + Precondition: nCell < size() + */ + lock_type& at( size_t nCell ) const + { + assert( nCell < size() ); + return m_arrLocks[ nCell ]; + } + + /// Size of lock array. + size_t size() const + { + return m_nCapacity; + } + }; + + /// Specialization \ref scoped_lock for lock::array + template + class scoped_lock< cds::lock::array< Lock, SelectPolicy, Alloc > >: public cds::details::noncopyable + { + public: + typedef cds::lock::array< Lock, SelectPolicy, Alloc > lock_array_type ; ///< Lock array type + + private: + //@cond + lock_array_type& m_arrLocks; + size_t m_nLockGuarded; + + static const size_t c_nLockAll = ~size_t(0); + //@endcond + + public: + /// Onws the lock array \p arrLocks and locks a cell determined by \p hint parameter + template + scoped_lock( lock_array_type& arrLocks, Q const& hint ) + : m_arrLocks( arrLocks ) + , m_nLockGuarded( arrLocks.lock( hint )) + {} + + /// Locks all from \p arrLocks array + scoped_lock( lock_array_type& arrLocks ) + : m_arrLocks( arrLocks ) + , m_nLockGuarded( c_nLockAll ) + { + arrLocks.lock_all(); + } + + ~scoped_lock() + { + if ( m_nLockGuarded == c_nLockAll ) + m_arrLocks.unlock_all(); + else + m_arrLocks.unlock( m_nLockGuarded ); + } + }; + +}} // namespace cds::lock + +#endif // #ifndef __CDS_LOCK_ARRAY_H diff --git a/cds/lock/scoped_lock.h b/cds/lock/scoped_lock.h new file mode 100644 index 00000000..85423678 --- /dev/null +++ b/cds/lock/scoped_lock.h @@ -0,0 +1,72 @@ +//$$CDS-header$$ + +#ifndef __CDS_LOCK_SCOPED_LOCK_H +#define __CDS_LOCK_SCOPED_LOCK_H + +#include +#include + +namespace cds { namespace lock { + + /// Scoped lock + /** + + An object of type \p scoped_lock controls the ownership of a lockable object within a scope. + A \p scoped_lock object maintains ownership of a lockable object throughout the \p scoped_lock object’s lifetime. + The behavior of a program is undefined if the lockable object does not exist for the entire lifetime + of the \p scoped_lock object. + The supplied \p Lock type shall have two methods: \p lock and \p unlock. + + The constructor locks the wrapped lock object, the destructor unlocks it. + + Scoped lock is not copy-constructible and not default-constructible. + + This class is similar to \p std::lock_quard + */ + template + class scoped_lock: public cds::details::noncopyable + { + public: + typedef Lock lock_type ; ///< Lock type + + protected: + lock_type& m_Lock ; ///< Owned lock object + + protected: + //@cond + // Only for internal use!!! + scoped_lock() + {} + //@endcond + public: + /// Get ownership of lock object \p l and calls l.lock() + scoped_lock( lock_type& l ) + : m_Lock( l ) + { + l.lock(); + } + + /// Get ownership of lock object \p l and conditionally locks it + /** + The constructor calls l.lock() only if \p bAlreadyLocked is \p false. + If \p bAlreadyLocked is \p true, no locking is performed. + + In any case, the destructor of \p scoped_lock object invokes l.unlock(). + */ + scoped_lock( lock_type& l, bool bAlreadyLocked ) + : m_Lock( l ) + { + if ( !bAlreadyLocked ) + l.lock(); + } + + /// Unlock underlying lock object and release ownership + ~scoped_lock() + { + m_Lock.unlock(); + } + }; +}} // namespace cds::lock + + +#endif // #ifndef __CDS_LOCK_SCOPED_LOCK_H diff --git a/cds/lock/spinlock.h b/cds/lock/spinlock.h new file mode 100644 index 00000000..19be1a4f --- /dev/null +++ b/cds/lock/spinlock.h @@ -0,0 +1,422 @@ +//$$CDS-header$$ + +#ifndef __CDS_LOCK_SPINLOCK_H +#define __CDS_LOCK_SPINLOCK_H + +/* + Defines spin-lock primitives + Editions: + 2012.01.23 1.1.0 khizmax Refactoring: use C++11 atomics + 2010.01.22 0.6.0 khizmax Refactoring: use cds::atomic namespace + Explicit memory ordering specification (atomic::memory_order_xxx) + 2006 khizmax Created +*/ + +#include +#include +#include +#include + +#include + +namespace cds { + /// Synchronization primitives + namespace lock { + /// Spin lock. + /** + Simple and light-weight spin-lock critical section + It is useful to gain access to small (short-timed) code + + Algorithm: + + TATAS (test-and-test-and-lock) + [1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors. + + No serialization performed - any of waiting threads may owns the spin-lock. + This spin-lock is NOT recursive: the thread owned the lock cannot call lock() method withod deadlock. + The method unlock() can call any thread + + DEBUG version: The spinlock stores owner thead id. Assertion is raised when: + - double lock attempt encountered by same thread (deadlock) + - unlock by another thread + + If spin-lock is locked the Backoff algorithm is called. Predefined backoff::LockDefault class yields current + thread and repeats lock attempts later + + Template parameters: + - @p Backoff backoff strategy. Used when spin lock is locked + */ + template + class Spinlock + { + public: + typedef Backoff backoff_strategy ; ///< back-off strategy type + private: + CDS_ATOMIC::atomic m_spin ; ///< Spin +# ifdef CDS_DEBUG + typename OS::ThreadId m_dbgOwnerId ; ///< Owner thread id (only for debug mode) +# endif + + public: + /// Construct free (unlocked) spin-lock + Spinlock() CDS_NOEXCEPT +# ifdef CDS_DEBUG + :m_dbgOwnerId( OS::nullThreadId() ) +# endif + { + m_spin.store( false, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Construct spin-lock in specified state + /** + In debug mode: if \p bLocked = true then spin-lock is made owned by current thread + */ + Spinlock( bool bLocked ) CDS_NOEXCEPT +# ifdef CDS_DEBUG + :m_dbgOwnerId( bLocked ? OS::getCurrentThreadId() : OS::nullThreadId() ) +# endif + { + m_spin.store( bLocked, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Dummy copy constructor + /** + In theory, spin-lock cannot be copied. However, it is not practical. + Therefore, we provide dummy copy constructor that do no copy in fact. The ctor + initializes the spin to free (unlocked) state like default ctor. + */ + Spinlock(const Spinlock& ) CDS_NOEXCEPT + : m_spin( false ) +# ifdef CDS_DEBUG + , m_dbgOwnerId( OS::nullThreadId() ) +# endif + {} + + /// Destructor. On debug time it checks whether spin-lock is free + ~Spinlock() + { + assert( !m_spin.load( CDS_ATOMIC::memory_order_relaxed ) ); + } + + /// Check if the spin is locked + bool is_locked() const CDS_NOEXCEPT + { + return m_spin.load( CDS_ATOMIC::memory_order_relaxed ); + } + + /// Try to lock the object + /** + Returns \p true if locking is succeeded + otherwise (if the spin is already locked) returns \p false + + Debug version: deadlock can be detected + */ + bool try_lock() CDS_NOEXCEPT + { + return tryLock(); + } + + /// Try to lock the object (synonym for \ref try_lock) + bool tryLock() CDS_NOEXCEPT + { + bool bCurrent = false; + m_spin.compare_exchange_strong( bCurrent, true, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ); + + CDS_DEBUG_DO( + if ( !bCurrent ) { + m_dbgOwnerId = OS::getCurrentThreadId(); + } + ) + return !bCurrent; + } + + /// Try to lock the object, repeat @p nTryCount times if failed + /** + Returns \p true if locking is succeeded + otherwise (if the spin is already locked) returns \p false + */ + bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT + { + return tryLock( nTryCount ); + } + + /// Try to lock the object (synonym for \ref try_lock) + bool tryLock( unsigned int nTryCount ) CDS_NOEXCEPT + { + Backoff backoff; + while ( nTryCount-- ) { + if ( tryLock() ) + return true; + backoff(); + } + return false; + } + + /// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected + void lock() CDS_NOEXCEPT + { + Backoff backoff; + + // Deadlock detected + assert( m_dbgOwnerId != OS::getCurrentThreadId() ); + + // TATAS algorithm + while ( !tryLock() ) { + while ( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) ) { + backoff(); + } + } + assert( m_dbgOwnerId == OS::getCurrentThreadId() ); + } + + /// Unlock the spin-lock. Debug version: deadlock may be detected + void unlock() CDS_NOEXCEPT + { + assert( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) ); + + assert( m_dbgOwnerId == OS::getCurrentThreadId() ); + CDS_DEBUG_DO( m_dbgOwnerId = OS::nullThreadId() ;) + + m_spin.store( false, CDS_ATOMIC::memory_order_release ); + } + }; + + /// Spin-lock implementation default for the current platform + typedef Spinlock Spin; + + /// Recursive spin lock. + /** + Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock. + + Template parameters: + - @p Integral one of integral atomic type: unsigned int, int, and others + - @p Backoff backoff strategy. Used when spin lock is locked + */ + template + class ReentrantSpinT + { + typedef OS::ThreadId thread_id ; ///< The type of thread id + + public: + typedef Integral integral_type ; ///< The integral type + typedef Backoff backoff_strategy ; ///< The backoff type + + private: + CDS_ATOMIC::atomic m_spin ; ///< spin-lock atomic + thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::nullThreadId() + + private: + //@cond + void beOwner( thread_id tid ) CDS_NOEXCEPT + { + m_OwnerId = tid; + } + + void free() CDS_NOEXCEPT + { + m_OwnerId = OS::nullThreadId(); + } + + bool isOwned( thread_id tid ) const CDS_NOEXCEPT + { + return m_OwnerId == tid; + } + + bool tryLockOwned( thread_id tid ) CDS_NOEXCEPT + { + if ( isOwned( tid )) { + m_spin.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + return true; + } + return false; + } + + bool tryAcquireLock() CDS_NOEXCEPT + { + integral_type nCurrent = 0; + return m_spin.compare_exchange_weak( nCurrent, 1, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ); + } + + bool tryAcquireLock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() )) + { + backoff_strategy bkoff; + + while ( nTryCount-- ) { + if ( tryAcquireLock() ) + return true; + bkoff(); + } + return false; + } + + void acquireLock() CDS_NOEXCEPT_( noexcept( backoff_strategy()() )) + { + // TATAS algorithm + backoff_strategy bkoff; + while ( !tryAcquireLock() ) { + while ( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) ) + bkoff(); + } + } + //@endcond + + public: + /// Default constructor initializes spin to free (unlocked) state + ReentrantSpinT() CDS_NOEXCEPT + : m_spin(0) + , m_OwnerId( OS::nullThreadId() ) + {} + + /// Dummy copy constructor + /** + In theory, spin-lock cannot be copied. However, it is not practical. + Therefore, we provide dummy copy constructor that do no copy in fact. The ctor + initializes the spin to free (unlocked) state like default ctor. + */ + ReentrantSpinT(const ReentrantSpinT& ) CDS_NOEXCEPT + : m_spin(0) + , m_OwnerId( OS::nullThreadId() ) + {} + + /// Construct object for specified state + ReentrantSpinT(bool bLocked) CDS_NOEXCEPT + : m_spin(0), + m_OwnerId( OS::nullThreadId() ) + { + if ( bLocked ) + lock(); + } + + /// Checks if the spin is locked + /** + The spin is locked if lock count > 0 and the current thread is not an owner of the lock. + Otherwise (i.e. lock count == 0 or the curren thread owns the spin) the spin is unlocked. + */ + bool is_locked() const CDS_NOEXCEPT + { + return !( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) == 0 || isOwned( cds::OS::getCurrentThreadId() )); + } + + /// Try to lock the spin-lock (synonym for \ref try_lock) + bool tryLock() CDS_NOEXCEPT + { + thread_id tid = OS::getCurrentThreadId(); + if ( tryLockOwned( tid ) ) + return true; + if ( tryAcquireLock()) { + beOwner( tid ); + return true; + } + return false; + } + + /// Try to lock the spin-lock. If spin-lock is free the current thread owns it. Return @p true if locking is success + bool try_lock() CDS_NOEXCEPT + { + return tryLock(); + } + + /// Try to lock the object (synonym for \ref try_lock) + bool tryLock( unsigned int nTryCount ) +# if !( (CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION >= 40600 && CDS_COMPILER_VERSION < 40700) || (CDS_COMPILER == CDS_COMPILER_CLANG && CDS_COMPILER_VERSION < 30100) ) + // GCC 4.6, clang 3.0 error in noexcept expression: + // cannot call member function ‘bool cds::lock::ReentrantSpinT::tryAcquireLock(unsigned int) without object + CDS_NOEXCEPT_( noexcept( tryAcquireLock(nTryCount) )) +# endif + { + thread_id tid = OS::getCurrentThreadId(); + if ( tryLockOwned( tid ) ) + return true; + if ( tryAcquireLock( nTryCount )) { + beOwner( tid ); + return true; + } + return false; + } + + /// Try to lock the object. + /** + If the spin-lock is locked the method repeats attempts to own spin-lock up to @p nTryCount times. + Between attempts @p backoff() is called. + Return @p true if current thread owns the lock @p false otherwise + */ + bool try_lock( unsigned int nTryCount ) +# if !( (CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION >= 40600 && CDS_COMPILER_VERSION < 40700) || (CDS_COMPILER == CDS_COMPILER_CLANG && CDS_COMPILER_VERSION < 30100) ) + // GCC 4.6, clang 3.0 error in noexcept expression: + // cannot call member function ‘bool cds::lock::ReentrantSpinT::tryLock(unsigned int) without object + CDS_NOEXCEPT_( noexcept( tryLock(nTryCount) )) +# endif + { + return tryLock( nTryCount ); + } + + /// Lock the object waits if it is busy + void lock() CDS_NOEXCEPT + { + thread_id tid = OS::getCurrentThreadId(); + if ( !tryLockOwned( tid ) ) { + acquireLock(); + beOwner( tid ); + } + } + + /// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise + bool unlock() CDS_NOEXCEPT + { + if ( isOwned( OS::getCurrentThreadId() ) ) { + integral_type n = m_spin.load( CDS_ATOMIC::memory_order_relaxed ); + if ( n > 1 ) + m_spin.store( n - 1, CDS_ATOMIC::memory_order_relaxed ); + else { + free(); + m_spin.store( 0, CDS_ATOMIC::memory_order_release ); + } + return true; + } + return false; + } + + /// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock + bool changeOwner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT + { + if ( isOwned( OS::getCurrentThreadId() ) ) { + assert( newOwnerId != OS::nullThreadId() ); + m_OwnerId = newOwnerId; + return true; + } + return false; + } + }; + + /// Recursive spin-lock based on atomic32u_t + typedef ReentrantSpinT ReentrantSpin32; + + /// Recursive spin-lock based on atomic64u_t type + typedef ReentrantSpinT ReentrantSpin64; + + /// Recursive spin-lock based on atomic32_t type + typedef ReentrantSpin32 ReentrantSpin; + + /// The best (for the current platform) auto spin-lock + typedef scoped_lock AutoSpin; + + } // namespace lock + + /// Standard (best for the current platform) spin-lock implementation + typedef lock::Spin SpinLock; + + /// Standard (best for the current platform) recursive spin-lock implementation + typedef lock::ReentrantSpin RecursiveSpinLock; + + /// 32bit recursive spin-lock shortcut + typedef lock::ReentrantSpin32 RecursiveSpinLock32; + + /// 64bit recursive spin-lock shortcut + typedef lock::ReentrantSpin64 RecursiveSpinLock64; + + /// Auto spin-lock shortcut + typedef lock::AutoSpin AutoSpinLock; + +} // namespace cds + +#endif // #ifndef __CDS_LOCK_SPINLOCK_H diff --git a/cds/memory/michael/allocator.h b/cds/memory/michael/allocator.h new file mode 100644 index 00000000..f571ed10 --- /dev/null +++ b/cds/memory/michael/allocator.h @@ -0,0 +1,1917 @@ +//$$CDS-header$$ + +#ifndef __CDS_MEMORY_MICHAEL_ALLOCATOR_TMPL_H +#define __CDS_MEMORY_MICHAEL_ALLOCATOR_TMPL_H + +/* + Michael allocator implementation + Source: + [2004] Maged Michael "Scalable Lock-Free Dynamic Memory Allocation" + + Editions: + 2011.09.07 khizmax Optimization: small page (about 64K) is allocated by Heap::alloc call. + This optimization allows to allocate system memory more regularly, + in blocks of 1M that leads to less memory fragmentation. + 2011.01.02 khizmax Created +*/ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace cds { + /// Memory-related algorithms: allocators etc. +namespace memory { + /// Michael's allocator (class Heap) + /** + \par Source + \li [2004] M.Michael "Scalable Lock-free Dynamic Memory Allocation" + + This namespace declares the main class Heap and a lot of helper classes. + */ +namespace michael { + + /// Size class + struct size_class { + unsigned int nBlockSize ; ///< block size in bytes + unsigned int nSBSize ; ///< superblock size (64K or 1M) + unsigned int nCapacity ; ///< superblock capacity (nSBSize / nBlockSize) + unsigned int nSBSizeIdx ; ///< internal superblock size index (page index) + }; + + /// %Heap based on system \p malloc and \p free functions + struct malloc_heap + { + /// Allocates memory block of \p nSize bytes (\p malloc wrapper) + static void * alloc( size_t nSize ) + { + return ::malloc( nSize ); + } + /// Returning memory block to the system (\p free wrapper) + static void free( void * p ) + { + ::free( p ); + } + }; + + /// %Heap based on system provided aligned \p malloc and \p free functions + struct aligned_malloc_heap + { + /// Allocates aligned memory block of \p nSize bytes with alignment \p nAlignment + static void * alloc( size_t nSize, size_t nAlignment ) + { + return cds::OS::aligned_malloc( nSize, nAlignment ); + } + /// Frees aligned memory block \p p that has been previosly allocated by \ref alloc call + static void free( void * p ) + { + cds::OS::aligned_free( p ); + } + }; + + /// Page heap based on \p Heap + /** + Page heap can allocate memory by page-sized block only. + \p Heap may be any heap that provides interface like \ref malloc_heap. + + This class is one of available implementation of opt::page_heap option. + */ + template + class page_allocator: public Heap + { + //@cond + typedef Heap base_class; + size_t m_nPageSize; + //@endcond + + public: + /// Initializes heap + page_allocator( + size_t nPageSize ///< page size in bytes + ) + : m_nPageSize( nPageSize ) + {} + + /// Allocate new page + void * alloc() + { + return base_class::alloc( m_nPageSize ); + } + + /// Free page \p pPage + void free( void * pPage ) + { + base_class::free( pPage ); + } + }; + + /// Page cacheable heap + /** + To improve performance this allocator maintains small list of free pages. + Page heap can allocate memory by page-sized block only. + + Template parameters: + \li \p FreeListCapacity - capacity of free-list, default value is 64 page + \li \p Heap may be any heap that provides interface like \ref malloc_heap. + + This class is one of available implementation of opt::page_heap option. + */ + template + class page_cached_allocator: public page_allocator + { + //@cond + typedef page_allocator base_class; + +#ifdef _DEBUG + struct make_null_ptr { + void operator ()(void *& p) + { + p = null_ptr(); + } + }; +#endif + + typedef container::VyukovMPMCCycleQueue< + void *, + opt::buffer< opt::v::static_buffer > +#ifdef _DEBUG + , opt::value_cleaner< make_null_ptr > +#endif + > free_list; + + free_list m_FreeList; + //@endcond + + public: + /// Initializes heap + page_cached_allocator( + size_t nPageSize ///< page size in bytes + ) + : base_class( nPageSize ) + , m_FreeList( FreeListCapacity ) + {} + + //@cond + ~page_cached_allocator() + { + void * pPage; + while ( m_FreeList.pop(pPage) ) + base_class::free( pPage ); + } + //@endcond + + /// Allocate new page + void * alloc() + { + void * pPage; + if ( !m_FreeList.pop( pPage ) ) + pPage = base_class::alloc(); + return pPage; + } + + /// Free page \p pPage + void free( void * pPage ) + { + if ( !m_FreeList.push( pPage )) + base_class::free( pPage ); + } + }; + + /// Implementation of opt::sizeclass_selector option + /** + Default size-class selector can manage memory blocks up to 64K. + */ + class CDS_EXPORT_API default_sizeclass_selector + { + //@cond + /// Count of different size-classes + static const size_t c_nSizeClassCount = 63; + + /// Max block size + static const size_t c_nMaxBlockSize = 64 * 1024; + + /// Page size of type 0 (64K) + static const unsigned int c_nPage64K = 64 * 1024 - 32; + + /// Page size of type 1 (1M) + static const unsigned int c_nPage1M = 1024 * 1024; + + static CDS_DATA_ALIGNMENT(128) unsigned int const m_szClassBounds[c_nSizeClassCount]; + static size_class const m_szClass[c_nSizeClassCount]; + static unsigned char const m_szClassMap[]; + //@endcond + public: + /// Type of size-class index + typedef unsigned int sizeclass_index; + +#ifdef _DEBUG + default_sizeclass_selector(); +#endif + + /// "No size class" index + static const sizeclass_index c_nNoSizeClass = (unsigned int) (0 - 1); + + /// Returns size-class count + static sizeclass_index size() + { + return c_nSizeClassCount; + } + + /// Returns page size in bytes for given page type \p nPageType + static size_t page_size(size_t nPageType ) + { + switch (nPageType) { + case 0: + return c_nPage64K; + case 1: + return c_nPage1M; + default: + assert(false) ; // anything forgotten?.. + } + return c_nPage1M; + } + + /// Returns count of page size-class + /** + This class supports pages of two types: 64K page for small objects and 1M page for other objects. + */ + static size_t pageTypeCount() + { + return 2; + } + + /// Returns size-class index for \p nSize + /** + For large blocks that cannot be allocated by Michael's allocator + the function must return -1. + */ + static sizeclass_index find( size_t nSize ) + { + if ( nSize > c_nMaxBlockSize ) { + // Too large block - allocate from system + return c_nNoSizeClass; + } + sizeclass_index szClass = m_szClassMap[ (nSize + 15) / 16 ]; + assert( nSize <= m_szClassBounds[ szClass ] ); + assert( szClass == 0 || m_szClassBounds[ szClass - 1] < nSize ); + + return szClass; + } + + /// Gets details::size_class struct for size-class index \p nIndex + static const size_class * at( sizeclass_index nIndex ) + { + assert( nIndex < size() ); + return m_szClass + nIndex; + } + }; + + //@cond + namespace details { + struct free_list_tag; + typedef boost::intrusive::list_base_hook< boost::intrusive::tag< free_list_tag > > free_list_locked_hook; + + struct partial_list_tag; + typedef boost::intrusive::list_base_hook< boost::intrusive::tag< partial_list_tag > > partial_list_locked_hook; + + struct intrusive_superblock_desc: public free_list_locked_hook, partial_list_locked_hook + {}; + } + //@endcond + + /// List of free superblock descriptor + /** + This class is a implementation of \ref opt::free_list option + */ + template + class free_list_locked: public boost::intrusive::list > + { + //@cond + typedef boost::intrusive::list > base_class; + public: + typedef details::free_list_locked_hook item_hook; + typedef Lock lock_type; + protected: + typedef cds::lock::scoped_lock auto_lock; + + mutable lock_type m_access; + //@endcond + + public: + /// Rebinds to other item type \p T2 + template + struct rebind { + typedef free_list_locked other ; ///< rebind result + }; + + public: + /// Push superblock descriptor to free-list + void push( T * pDesc ) + { + assert( base_class::node_algorithms::inited( static_cast(pDesc) ) ); + auto_lock al(m_access); + base_class::push_back( *pDesc ); + } + + /// Pop superblock descriptor from free-list + T * pop() + { + auto_lock al(m_access); + if ( base_class::empty() ) + return null_ptr(); + T& rDesc = base_class::front(); + base_class::pop_front(); + assert( base_class::node_algorithms::inited( static_cast(&rDesc) ) ); + return &rDesc; + } + + /// Returns current count of superblocks in free-list + size_t size() const + { + auto_lock al(m_access); + return base_class::size(); + } + }; + + /// List of partial filled superblock descriptor + /** + This class is a implementation of \ref opt::partial_list option + */ + template + class partial_list_locked: public boost::intrusive::list > + { + //@cond + typedef boost::intrusive::list > base_class; + public: + typedef details::partial_list_locked_hook item_hook; + typedef Lock lock_type; + protected: + typedef cds::lock::scoped_lock auto_lock; + + mutable lock_type m_access; + //@endcond + + public: + /// Rebinds to other item type \p T2 + template + struct rebind { + typedef partial_list_locked other ; ///< rebind result + }; + + public: + /// Push a superblock \p pDesc to the list + void push( T * pDesc ) + { + auto_lock al( m_access ); + assert( base_class::node_algorithms::inited( static_cast(pDesc) ) ); + base_class::push_back( *pDesc ); + } + + /// Pop superblock from the list + T * pop() + { + auto_lock al( m_access ); + if ( base_class::empty() ) + return null_ptr(); + T& rDesc = base_class::front(); + base_class::pop_front(); + assert( base_class::node_algorithms::inited( static_cast(&rDesc) ) ); + return &rDesc; + } + + /// Removes \p pDesc descriptor from the free-list + bool unlink( T * pDesc ) + { + assert( pDesc != null_ptr() ); + auto_lock al( m_access ); + // !inited(pDesc) is equal to "pDesc is being linked to partial list" + if ( !base_class::node_algorithms::inited( static_cast(pDesc) ) ) { + base_class::erase( base_class::iterator_to( *pDesc ) ); + return true; + } + return false; + } + + /// Count of element in the list + size_t size() const + { + auto_lock al( m_access ); + return base_class::size(); + } + }; + + /// Summary processor heap statistics + /** + Summary heap statistics for use with Heap::summaryStat function. + */ + struct summary_stat + { + size_t nAllocFromActive ; ///< Event count of allocation from active superblock + size_t nAllocFromPartial ; ///< Event count of allocation from partial superblock + size_t nAllocFromNew ; ///< Event count of allocation from new superblock + size_t nFreeCount ; ///< Count of \p free function call + size_t nPageAllocCount ; ///< Count of page (superblock) allocated + size_t nPageDeallocCount ; ///< Count of page (superblock) deallocated + size_t nDescAllocCount ; ///< Count of superblock descriptors + size_t nDescFull ; ///< Count of full superblock + atomic64u_t nBytesAllocated ; ///< Count of allocated bytes (for heap managed memory blocks) + atomic64u_t nBytesDeallocated ; ///< Count of deallocated bytes (for heap managed memory blocks) + + size_t nSysAllocCount ; ///< Count of \p alloc and \p alloc_aligned function call (for large memory blocks that allocated directly from OS) + size_t nSysFreeCount ; ///< Count of \p free and \p free_aligned function call (for large memory blocks that allocated directly from OS) + atomic64u_t nSysBytesAllocated ; ///< Count of allocated bytes (for large memory blocks that allocated directly from OS) + atomic64_t nSysBytesDeallocated; ///< Count of deallocated bytes (for large memory blocks that allocated directly from OS) + + // Internal contention indicators + /// CAS failure counter for updating active field of active block of \p alloc_from_active Heap internal function + /** + Contention indicator. The less value is better + */ + size_t nActiveDescCASFailureCount; + /// CAS failure counter for updating active field of active block of \p alloc_from_active Heap internal function + /** + Contention indicator. The less value is better + */ + size_t nActiveAnchorCASFailureCount; + /// CAS failure counter for updating anchor field of partial block of \p alloc_from_partial Heap internal function + /** + Contention indicator. The less value is better + */ + size_t nPartialDescCASFailureCount; + /// CAS failure counter for updating anchor field of partial block of \p alloc_from_partial Heap internal function + /** + Contention indicator. The less value is better + */ + size_t nPartialAnchorCASFailureCount; + + + public: + /// Constructs empty statistics. All counters are zero. + summary_stat() + { + clear(); + } + + /// Difference statistics + /** + This operator computes difference between \p *this and \p stat and places the difference to \p this. + Returns \p *this; + */ + summary_stat& operator -=( const summary_stat& stat ) + { + nAllocFromActive -= stat.nAllocFromActive; + nAllocFromPartial -= stat.nAllocFromPartial; + nAllocFromNew -= stat.nAllocFromNew; + nFreeCount -= stat.nFreeCount; + nPageAllocCount -= stat.nPageAllocCount; + nPageDeallocCount -= stat.nPageDeallocCount; + nDescAllocCount -= stat.nDescAllocCount; + nDescFull -= stat.nDescFull; + nBytesAllocated -= stat.nBytesAllocated; + nBytesDeallocated -= stat.nBytesDeallocated; + + nSysAllocCount -= stat.nSysAllocCount; + nSysFreeCount -= stat.nSysFreeCount; + nSysBytesAllocated -= stat.nSysBytesAllocated; + nSysBytesDeallocated -= stat.nSysBytesDeallocated; + + nActiveDescCASFailureCount -= stat.nActiveDescCASFailureCount; + nActiveAnchorCASFailureCount -= stat.nActiveAnchorCASFailureCount; + nPartialDescCASFailureCount -= stat.nPartialDescCASFailureCount; + nPartialAnchorCASFailureCount -= stat.nPartialAnchorCASFailureCount; + + return *this; + } + + /// Clears statistics + /** + All counters are set to zero. + */ + void clear() + { + memset( this, 0, sizeof(*this)); + } + + //@cond + template + summary_stat& add_procheap_stat( const Stat& stat ) + { + nAllocFromActive += stat.allocFromActive(); + nAllocFromPartial += stat.allocFromPartial(); + nAllocFromNew += stat.allocFromNew(); + nFreeCount += stat.freeCount(); + nPageAllocCount += stat.blockAllocated(); + nPageDeallocCount += stat.blockDeallocated(); + nDescAllocCount += stat.descAllocCount(); + nDescFull += stat.descFull(); + nBytesAllocated += stat.allocatedBytes(); + nBytesDeallocated += stat.deallocatedBytes(); + + nActiveDescCASFailureCount += stat.activeDescCASFailureCount(); + nActiveAnchorCASFailureCount += stat.activeAnchorCASFailureCount(); + nPartialDescCASFailureCount += stat.partialDescCASFailureCount(); + nPartialAnchorCASFailureCount += stat.partialAnchorCASFailureCount(); + + return *this; + } + + template + summary_stat& add_heap_stat( const Stat& stat ) + { + nSysAllocCount += stat.allocCount(); + nSysFreeCount += stat.freeCount(); + + nSysBytesAllocated += stat.allocatedBytes(); + nSysBytesDeallocated+= stat.deallocatedBytes(); + + return *this; + } + //@endcond + }; + + /// Michael's allocator + /** + This class provides base functionality for Michael's allocator. It does not provide + the interface described by \p std::allocator, therefore, we name it as a heap, not as an allocator. + The heap interface is closer to semantics of \p malloc / \p free system functions. + The heap supports allocation of aligned and unaligned data. + + The algorithm is based on simplified version of + \li [2004] M.Michael "Scalable Lock-free Dynamic Memory Allocation" + + that, in turn, is concurrent version of well-known Hoard allocator developed by Emery Berger, see + \li [2002] Emery Berger "Memory Management for High-Performance Application", PhD thesis + + This is powerful, scalable, fully customizable heap with fast-path without any locks + that has been developed specifically for multi-threading. + With opt:sys_topology you can set as many allocation arena ("processor heap") as you need. + You can manually bound any your thread to any arena ("processor"). With opt::sizeclass_selector option you can manage + allocation granularity. With opt::page_heap you can utilize any OS-provided features for page allocation + like \p mmap, \p VirtualAlloc etc. The heap can gather internal statistics that helps you to tune your application. + The opt::check_bounds feature can help you to find a memory buffer overflow. + + Brief algorithm description from Michael's work: + + Large blocks (greater than 64K) are allocated directly from the OS and freed directly to the OS. For smaller block sizes, + the heap is composed of large superblocks (64 KB or 1MB size). Each superblock is divided into multiple equal-sized blocks. + Superblocks are distributed among size classes based on their block sizes. Each size class contains multiple processor + heaps proportional to the number of processors in the system. A processor heap contains at most one active superblock. + An active superblock contains one or more blocks available for reservation that are guaranteed to be available to threads + that reach them through the header of the processor heap. Each superblock is associated with a descriptor. Each allocated + block contains a prefix (8 bytes) that points to the descriptor of its superblock. On the first call to malloc, the static + structures for the size classes and processor heaps (about 16 KB for a 16 processor machine) are allocated and initialized + in a lock-free manner. + + Malloc starts by identifying the appropriate processor heap, based on the requested block size and the identity of + the calling thread. Typically, the heap already has an active superblock with blocks available for reservation. The thread + atomically reads a pointer to the descriptor of the active superblock and reserves a block. Next, the thread atomically + pops a block from that superblock and updates its descriptor. A typical free pushes the freed block into the list of + available blocks of its original superblock by atomically updating its descriptor. + + Constraint: one superblock may contain up to 2048 block. This restriction imposes a restriction on the maximum + superblock size. + + Available \p Options: + - \ref opt::sys_topology - class that describes system topology needed for allocator. + Default is \p cds::OS::topology (see cds::OS::Win32::topology for interface description) + - \ref opt::system_heap - option setter for an allocator for large blocks that is used for direct allocation from OS. + Default is \ref malloc_heap. + - \ref opt::aligned_heap - option setter for a heap used for internal aligned memory management. + Default is \ref aligned_malloc_heap + - \ref opt::page_heap - option setter for a heap used for page (superblock) allocation of 64K/1M size. + Default is \ref page_cached_allocator + - \ref opt::sizeclass_selector - option setter for a class used to select appropriate size-class + for incoming allocation request. + Default is \ref default_sizeclass_selector + - \ref opt::free_list - option setter for a class to manage a list of free superblock descriptors + Default is \ref free_list_locked + - \ref opt::partial_list - option setter for a class to manage a list of partial filled superblocks + Default is \ref partial_list_locked + - \ref opt::procheap_stat - option setter for a class to gather internal statistics for memory allocation + that is maintained by the heap. + Default is \ref procheap_empty_stat + - \ref opt::os_allocated_stat - option setter for a class to gather internal statistics for large block + allocation. Term "large block" is specified by the size-class selector (see \ref opt::sizeclass_selector) + and it is 64K for \ref default_sizeclass_selector. Any block that is large that 64K is allocated from + OS directly. \p os_allocated_stat option is set a class to gather statistics for large blocks. + Default is \ref os_allocated_empty + - \ref opt::check_bounds - a bound checker. + Default is no bound checker (cds::opt::none) + + \par Usage: + The heap is the basic building block for your allocator or operator new implementation. + + \code + #include + + // Heap with explicitly defined options: + cds::memory::michael::Heap< + opt::aligned_heap< aligned_malloc_heap >, + opt::page_heap< page_cached_allocator<16, malloc_heap> > + > myHeap; + + // Heap with default options: + cds::memory::michael::Heap<> myDefHeap; + \endcode + + \par How to make std-like allocator + + There are serious differencies of heap and std::allocator interface: + - Heap is stateful, and \p std::allocator is stateless. + - Heap has much more template parameters than \p std::allocator + - Heap has low-level interface for memory allocating only unlike the allocator + interface that can construct/destroy objects of any type T. + + To convert heap interface into \p std::allocator -like interface you should: + - Declare object of class cds::memory::michael::Heap specifying the necessary + template parameters; this is usually static object + - Create a class with \p std::allocator interface that uses the function of heap. + \code + #include + + template + class MichaelAllocator + { + typedef std::allocator std_allocator; + typedef cds::memory::michael::Heap<> michael_heap; + + // Michael heap static object + static michael_heap s_Heap; + public: + // Declare typedefs from std::allocator + typedef typename std_allocator::const_pointer const_pointer; + typedef typename std_allocator::pointer pointer; + typedef typename std_allocator::const_reference const_reference; + typedef typename std_allocator::reference reference; + typedef typename std_allocator::difference_type difference_type; + typedef typename std_allocator::size_type size_type; + typedef typename std_allocator::value_type value_type; + + // Allocation function + pointer allocate( size_type _Count, const void* _Hint ) + { + return reinterpret_cast( s_Heap.alloc( sizeof(T) * _Count )); + } + + // Deallocation function + void deallocate( pointer _Ptr, size_type _Count ) + { + s_Heap.free( _Ptr ); + } + + // Other std::allocator specific functions: address, construct, destroy, etc. + ... + + // Rebinding allocator to other type + template + struct rebind { + typedef MichaelAllocator<_Other> other; + }; + }; + + // In .cpp file: + MichaelAllocator::michael_heap MichaelAllocator::s_Heap; + + \endcode + */ +#ifdef CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT + template +#else + template < + typename O1 = opt::none, + typename O2 = opt::none, + typename O3 = opt::none, + typename O4 = opt::none, + typename O5 = opt::none, + typename O6 = opt::none, + typename O7 = opt::none, + typename O8 = opt::none, + typename O9 = opt::none, + typename O10= opt::none + > +#endif + class Heap { + protected: + + //@cond + static const unsigned int c_nAlignment = cds::c_nCacheLineSize; + static const unsigned int c_nDefaultBlockAlignment = 8; + + struct default_options { + typedef cds::OS::topology sys_topology; + typedef malloc_heap system_heap; + typedef page_cached_allocator<> page_heap; + typedef aligned_malloc_heap aligned_heap; + typedef default_sizeclass_selector sizeclass_selector; + typedef free_list_locked free_list; + typedef partial_list_locked partial_list; + typedef procheap_empty_stat procheap_stat; + typedef os_allocated_empty os_allocated_stat; + typedef cds::opt::none check_bounds; + }; + //@endcond + + protected: + //@cond +#ifdef CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT + typedef typename opt::make_options::type options; +#else + typedef typename opt::make_options::type options; +#endif + //@endcond + + //@cond + typedef unsigned char byte; + //@endcond + public: + typedef typename options::sys_topology sys_topology ; ///< effective system topology + typedef typename options::system_heap system_heap ; ///< effective system heap + typedef typename options::aligned_heap aligned_heap ; ///< effective aligned heap + typedef typename options::sizeclass_selector sizeclass_selector ; ///< effective sizeclass selector + typedef typename options::page_heap page_heap ; ///< effective page heap + typedef typename options::procheap_stat procheap_stat ; ///< effective processor heap statistics + typedef typename options::os_allocated_stat os_allocated_stat ; ///< effective OS-allocated memory statistics + typedef details::bound_checker_selector< typename options::check_bounds > bound_checker ; ///< effective bound checker + + // forward declarations + //@cond + struct superblock_desc; + struct processor_heap_base; + struct processor_desc; + //@endcond + + /// Superblock states + /** + A superblock can be in one of four states: \p ACTIVE, \p FULL, + \p PARTIAL, or \p EMPTY. A superblock is \p ACTIVE if it is the active + superblock in a heap, or if a thread intends to try to install it + as such. A superblock is \p FULL if all its blocks are either allocated + or reserved. A superblock is \p PARTIAL if it is not \p ACTIVE + and contains unreserved available blocks. A superblock is + \p EMPTY if all its blocks are free and it is not \p ACTIVE. + */ + enum superblock_state { + SBSTATE_ACTIVE = 0, ///< superblock is active + SBSTATE_FULL = 1, ///< superblock is full + SBSTATE_PARTIAL = 2, ///< superblock is partially allocated + SBSTATE_EMPTY = 3 ///< superblock is empty and may be freed + }; + + static const size_t c_nMaxBlockInSuperBlock = 1024 * 2 ; ///< Max count of blocks in superblock (2 ** 11) + + /// Anchor of the superblock descriptor. Updated by CAS + struct anchor_tag { + unsigned long long avail:11 ; ///< index of first available block in the superblock + unsigned long long count:11 ; ///< number of unreserved blocks in the superblock + unsigned long long state: 2 ; ///< state of the superblock (see \ref superblock_state enum) + unsigned long long tag:40 ; ///< ABA prevention tag + }; + + /// Superblock descriptor + struct superblock_desc + : public options::free_list::item_hook + , public options::partial_list::item_hook + { + CDS_ATOMIC::atomic anchor ; ///< anchor, see \ref anchor_tag + byte * pSB ; ///< ptr to superblock + processor_heap_base * pProcHeap ; ///< pointer to owner processor heap + unsigned int nBlockSize ; ///< block size in bytes + unsigned int nCapacity ; ///< superblock size/block size + + //@cond + superblock_desc() + : pSB( null_ptr() ) + , pProcHeap( null_ptr() ) + {} + //@endcond + }; + + //@cond + typedef typename options::free_list::template rebind::other free_list; + typedef typename options::partial_list::template rebind::other partial_list; + //@endcond + +#if CDS_BUILD_BITS == 32 + /// Allocated block header + /** + Each allocated block has 8-byte header. + The header contains pointer to owner superblock descriptor and the redirection flag. + If the block has been allocated by \ref alloc, then the redirection flag is 0 and the block's structure is: + \code + +---------------+ + | blockHeader | [8 byte] pointer to owner superblock (flag=0) + +---------------+ + | | <- memory allocated + | memory | + | | + +---------------+ + \endcode + If the block has been allocated by \ref alloc_aligned, then it is possible that pointer returned must be aligned. + In this case the redirection flag is 1 and the block's structure is: + \code + +---------------+ + +-> | blockHeader | [8 byte] pointer to owner superblock (flag=0) + | +---------------+ + | | padding | + | | (unused) | + | | | + | +---------------+ + +-- | blockHeader | [8 byte] pointer to block head (flag=1) + +---------------+ + | | <- memory allocated + | memory | + | | + +---------------+ + \endcode + */ + class block_header + { + //@cond + enum { + bitAligned = 1, + bitOSAllocated = 2 + }; + + union { + superblock_desc * pDesc ; // pointer to superblock descriptor + atomic32u_t nSize ; // block size (allocated form OS) + }; + atomic32u_t nFlags; + + public: + void set( superblock_desc * pdesc, atomic32u_t isAligned ) + { + pDesc = pdesc; + nFlags = isAligned ? bitAligned : 0; + } + + superblock_desc * desc() + { + assert( (nFlags & bitOSAllocated) == 0 ); + return (nFlags & bitAligned) ? reinterpret_cast( pDesc )->desc() : pDesc; + } + + block_header * begin() + { + return (nFlags & bitAligned) ? reinterpret_cast(pDesc) : this; + } + + bool isAligned() const + { + return (nFlags & bitAligned) != 0; + } + + bool isOSAllocated() const + { + return (nFlags & bitOSAllocated) != 0; + } + + void setOSAllocated( size_t sz ) + { + nSize = sz; + nFlags = bitOSAllocated; + } + + size_t getOSAllocSize() const + { + assert( isOSAllocated() ); + return nSize; + } + + //@endcond + }; +#elif CDS_BUILD_BITS == 64 + //@cond + class block_header + { + enum { + bitAligned = 1, + bitOSAllocated = 2 + }; + typedef cds::details::marked_ptr marked_desc_ptr; + // If bitOSAllocated is set the pDesc contains size of memory block + // allocated from OS + marked_desc_ptr pDesc; + public: + void set( superblock_desc * pdesc, atomic32u_t isAligned ) + { + pDesc = marked_desc_ptr( pdesc, isAligned ); + } + + superblock_desc * desc() + { + assert( !isOSAllocated() ); + return (pDesc.bits() & bitAligned) ? reinterpret_cast( pDesc.ptr() )->desc() : pDesc.ptr(); + } + + block_header * begin() + { + return (pDesc.bits() & bitAligned) ? reinterpret_cast( pDesc.ptr() ) : this; + } + + bool isAligned() const + { + return (pDesc.bits() & bitAligned) != 0; + } + + bool isOSAllocated() const + { + return (pDesc.bits() & bitOSAllocated) != 0; + } + + void setOSAllocated( size_t nSize ) + { + + pDesc = marked_desc_ptr( reinterpret_cast(nSize << 2), bitOSAllocated ); + } + + size_t getOSAllocSize() const + { + assert( isOSAllocated() ); + return reinterpret_cast( pDesc.ptr() ) >> 2; + } + + }; + //@endcond +#else +# error "Unexpected value of CDS_BUILD_BITS" +#endif // CDS_BUILD_BITS + + //@cond + struct free_block_header: block_header { + unsigned int nNextFree; + }; + //@endcond + +#if CDS_BUILD_BITS == 32 + /// Processor heap's \p active field + /** + The \p active field in the processor heap structure is primarily a pointer to the descriptor + of the active superblock owned by the processor heap. If the value of \p active is not \p NULL, it is + guaranteed that the active superblock has at least one block available for reservation. + Since the addresses of superblock descriptors can be guaranteed to be aligned to some power + of 2 (e.g., 64), as an optimization, we can carve a credits subfield to hold the number + of blocks available for reservation in the active superblock less one. That is, if the value + of credits is n, then the active superblock contains n+1 blocks available for reservation + through the \p active field. Note that the number of blocks in a superblock is not limited + to the maximum reservations that can be held in the credits subfield. In a typical malloc operation + (i.e., when \p active != \p NULL and \p credits > 0), the thread reads \p active and then + atomically decrements credits while validating that the active superblock is still valid. + */ + class active_tag { + //@cond + superblock_desc * pDesc; + atomic32u_t nCredits; + + public: + static const unsigned int c_nMaxCredits = 0 - 1; + + public: + CDS_CONSTEXPR active_tag() CDS_NOEXCEPT + : pDesc(null_ptr()) + , nCredits(0) + {} + +# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT + active_tag( active_tag const& ) CDS_NOEXCEPT_DEFAULTED = default; + ~active_tag() CDS_NOEXCEPT_DEFAULTED = default; + active_tag& operator=(active_tag const& ) CDS_NOEXCEPT_DEFAULTED = default; +# if defined(CDS_MOVE_SEMANTICS_SUPPORT) && !defined(CDS_DISABLE_DEFAULT_MOVE_CTOR) + active_tag( active_tag&& ) CDS_NOEXCEPT_DEFAULTED = default; + active_tag& operator=(active_tag&&) CDS_NOEXCEPT_DEFAULTED = default; +# endif +# endif + + /// Returns pointer to superblock descriptor + superblock_desc * ptr() const + { + return pDesc; + } + + /// Sets superblock descriptor + void ptr( superblock_desc * p ) + { + pDesc = p; + } + + unsigned int credits() const + { + return nCredits; + } + + void credits( unsigned int n ) + { + nCredits = n; + } + + void clear() + { + pDesc = null_ptr(); + nCredits = 0; + } + + void set( superblock_desc * pSB, unsigned int n ) + { + pDesc = pSB; + nCredits = n; + } + //@endcond + }; +#elif CDS_BUILD_BITS == 64 + //@cond + class active_tag + { + public: + static const unsigned int c_nMaxCredits = c_nAlignment - 1 ; // 0x003F; + protected: + typedef cds::details::marked_ptr marked_desc_ptr; + marked_desc_ptr pDesc; + + public: + active_tag() CDS_NOEXCEPT + : pDesc( null_ptr() ) + {} +# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT + // Clang 3.1: error: first argument to atomic operation must be a pointer to a trivially-copyable type + //active_tag() CDS_NOEXCEPT_DEFAULTED = default; + active_tag( active_tag const& ) CDS_NOEXCEPT_DEFAULTED = default; + ~active_tag() CDS_NOEXCEPT_DEFAULTED = default; + active_tag& operator=(active_tag const&) CDS_NOEXCEPT_DEFAULTED = default; +# if defined(CDS_MOVE_SEMANTICS_SUPPORT) && !defined(CDS_DISABLE_DEFAULT_MOVE_CTOR) + active_tag( active_tag&& ) CDS_NOEXCEPT_DEFAULTED = default; + active_tag& operator=(active_tag&&) CDS_NOEXCEPT_DEFAULTED = default; +# endif +# endif + superblock_desc * ptr() const + { + return pDesc.ptr(); + } + + void ptr( superblock_desc * p ) + { + assert( (reinterpret_cast(p) & c_nMaxCredits) == 0 ); + pDesc = marked_desc_ptr( p, pDesc.bits()); + } + + unsigned int credits() + { + return (unsigned int) pDesc.bits(); + } + + void credits( unsigned int n ) + { + assert( n <= c_nMaxCredits ); + pDesc = marked_desc_ptr( pDesc.ptr(), n ); + } + + void clear() + { + pDesc = marked_desc_ptr(); + } + + void set( superblock_desc * pSB, unsigned int n ) + { + assert( (reinterpret_cast(pSB) & c_nMaxCredits) == 0 ); + pDesc = marked_desc_ptr( pSB, n ); + } + + }; + //@endcond +#else +# error "Unexpected value of CDS_BUILD_BITS" +#endif // CDS_BUILD_BITS + + + /// Processor heap + struct processor_heap_base + { + CDS_DATA_ALIGNMENT(8) CDS_ATOMIC::atomic active; ///< pointer to the descriptor of active superblock owned by processor heap + processor_desc * pProcDesc ; ///< pointer to parent processor descriptor + const size_class * pSizeClass ; ///< pointer to size class + CDS_ATOMIC::atomic pPartial ; ///< pointer to partial filled superblock (may be NULL) + partial_list partialList ; ///< list of partial filled superblocks owned by the processor heap + unsigned int nPageIdx ; ///< page size-class index, \ref c_nPageSelfAllocation - "small page" + + /// Small page marker + /** + If page is small and can be allocated by the Heap, the \p nPageIdx value is \p c_nPageSelfAllocation. + This optimization allows to allocate system memory more regularly, in blocks of 1M that leads + to less memory fragmentation. + */ + static const unsigned int c_nPageSelfAllocation = (unsigned int) -1; + + procheap_stat stat ; ///< heap statistics + //processor_heap_statistics stat; + + //@cond + processor_heap_base() CDS_NOEXCEPT + : pProcDesc( null_ptr() ) + , pSizeClass( null_ptr() ) + , pPartial( null_ptr() ) + { + assert( (reinterpret_cast(this) & (c_nAlignment - 1)) == 0 ); + } + //@endcond + + /// Get partial superblock owned by the processor heap + superblock_desc * get_partial() + { + superblock_desc * pDesc = pPartial.load(CDS_ATOMIC::memory_order_acquire); + do { + if ( !pDesc ) { + pDesc = partialList.pop(); + break; + } + } while ( !pPartial.compare_exchange_weak( pDesc, null_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed)); + + //assert( pDesc == NULL || free_desc_list::node_algorithms::inited( static_cast(pDesc) )); + //assert( pDesc == NULL || partial_desc_list::node_algorithms::inited( static_cast(pDesc) ) ); + return pDesc; + } + + /// Add partial superblock \p pDesc to the list + void add_partial( superblock_desc * pDesc ) + { + assert( pPartial != pDesc ); + //assert( partial_desc_list::node_algorithms::inited( static_cast(pDesc) ) ); + + superblock_desc * pCur = null_ptr(); + if ( !pPartial.compare_exchange_strong(pCur, pDesc, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed) ) + partialList.push( pDesc ); + } + + + /// Remove superblock \p pDesc from the list of partial superblock + bool unlink_partial( superblock_desc * pDesc ) + { + return partialList.unlink( pDesc ); + } + }; + + /// Aligned superblock descriptor + typedef typename cds::details::type_padding::type processor_heap; + + /// Processor descriptor + struct processor_desc + { + processor_heap * arrProcHeap ; ///< array of processor heap + free_list listSBDescFree ; ///< List of free superblock descriptors + page_heap * pageHeaps ; ///< array of page heap (one for each page size) + + //@cond + processor_desc() + : arrProcHeap( null_ptr() ) + , pageHeaps( null_ptr() ) + {} + //@endcond + }; + + + protected: + sys_topology m_Topology ; ///< System topology + system_heap m_LargeHeap ; ///< Heap for large block + aligned_heap m_AlignedHeap ; ///< Internal aligned heap + sizeclass_selector m_SizeClassSelector ; ///< Size-class selector + CDS_ATOMIC::atomic * m_arrProcDesc ; ///< array of pointers to the processor descriptors + unsigned int m_nProcessorCount ; ///< Processor count + bound_checker m_BoundChecker ; ///< Bound checker + + os_allocated_stat m_OSAllocStat ; ///< OS-allocated memory statistics + + protected: + //@cond + + /// Allocates large block from system memory + block_header * alloc_from_OS( size_t nSize ) + { + block_header * p = reinterpret_cast( m_LargeHeap.alloc( nSize ) ); + m_OSAllocStat.incBytesAllocated( nSize ); + p->setOSAllocated( nSize ); + return p; + } + + /// Allocates from the active superblock if it possible + block_header * alloc_from_active( processor_heap * pProcHeap ) + { + active_tag oldActive; + int nCollision = -1; + + // Reserve block + while ( true ) { + ++nCollision; + oldActive = pProcHeap->active.load(CDS_ATOMIC::memory_order_acquire); + if ( !oldActive.ptr() ) + return null_ptr(); + unsigned int nCredits = oldActive.credits(); + active_tag newActive ; // default = 0 + if ( nCredits != 0 ) { + newActive = oldActive; + newActive.credits( nCredits - 1 ); + } + if ( pProcHeap->active.compare_exchange_strong( oldActive, newActive, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + break; + } + + if ( nCollision ) + pProcHeap->stat.incActiveDescCASFailureCount( nCollision ); + + // pop block + superblock_desc * pDesc = oldActive.ptr(); + + anchor_tag oldAnchor; + anchor_tag newAnchor; + byte * pAddr; + unsigned int nMoreCredits = 0; + + nCollision = -1; + do { + ++nCollision; + newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire); + + assert( oldAnchor.avail < pDesc->nCapacity ); + pAddr = pDesc->pSB + oldAnchor.avail * (unsigned long long) pDesc->nBlockSize; + newAnchor.avail = reinterpret_cast( pAddr )->nNextFree; + newAnchor.tag += 1; + + if ( oldActive.credits() == 0 ) { + // state must be ACTIVE + if ( oldAnchor.count == 0 ) + newAnchor.state = SBSTATE_FULL; + else { + nMoreCredits = oldAnchor.count < active_tag::c_nMaxCredits ? ((unsigned int) oldAnchor.count) : active_tag::c_nMaxCredits; + newAnchor.count -= nMoreCredits; + } + } + } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + + if ( nCollision ) + pProcHeap->stat.incActiveAnchorCASFailureCount( nCollision ); + + assert( newAnchor.state != SBSTATE_EMPTY ); + + if ( newAnchor.state == SBSTATE_FULL ) + pProcHeap->stat.incDescFull(); + if ( oldActive.credits() == 0 && oldAnchor.count > 0 ) + update_active( pProcHeap, pDesc, nMoreCredits ); + + pProcHeap->stat.incAllocFromActive(); + + // block_header fields is not needed to setup + // It was set in alloc_from_new_superblock + assert( reinterpret_cast( pAddr )->desc() == pDesc ); + assert( !reinterpret_cast( pAddr )->isOSAllocated() ); + assert( !reinterpret_cast( pAddr )->isAligned() ); + + return reinterpret_cast( pAddr ); + } + + /// Allocates from a partial filled superblock if it possible + block_header * alloc_from_partial( processor_heap * pProcHeap ) + { + retry: + superblock_desc * pDesc = pProcHeap->get_partial(); + if ( !pDesc ) + return null_ptr(); + + // reserve blocks + anchor_tag oldAnchor; + anchor_tag newAnchor; + //byte * pAddr; + unsigned int nMoreCredits = 0; + + int nCollision = -1; + do { + ++nCollision; + + newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire); + if ( oldAnchor.state == SBSTATE_EMPTY ) { + free_superblock( pDesc ); + goto retry; + } + + nMoreCredits = ((unsigned int)(oldAnchor.count - 1)) < active_tag::c_nMaxCredits ? (unsigned int)(oldAnchor.count - 1) : active_tag::c_nMaxCredits; + newAnchor.count -= nMoreCredits + 1; + newAnchor.state = (nMoreCredits > 0) ? SBSTATE_ACTIVE : SBSTATE_FULL; + newAnchor.tag += 1; + } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed) ); + + if ( nCollision ) + pProcHeap->stat.incPartialDescCASFailureCount( nCollision ); + + if ( newAnchor.state == SBSTATE_FULL ) + pProcHeap->stat.incDescFull(); + + // Now, the thread is guaranteed to have reserved one or more blocks + // pop reserved block + byte * pAddr; + nCollision = -1; + do { + ++nCollision; + + newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire); + + assert( oldAnchor.avail < pDesc->nCapacity ); + pAddr = pDesc->pSB + oldAnchor.avail * pDesc->nBlockSize; + newAnchor.avail = reinterpret_cast( pAddr )->nNextFree; + ++newAnchor.tag; + } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed) ); + + if ( nCollision ) + pProcHeap->stat.incPartialAnchorCASFailureCount( nCollision ); + + assert( newAnchor.state != SBSTATE_EMPTY ); + + pProcHeap->stat.incAllocFromPartial(); + + if ( nMoreCredits > 0 ) + update_active( pProcHeap, pDesc, nMoreCredits ); + + // block_header fields is not needed to setup + // It was set in alloc_from_new_superblock + assert( reinterpret_cast( pAddr )->desc() == pDesc ); + assert( !reinterpret_cast( pAddr )->isAligned() ); + assert( !reinterpret_cast( pAddr )->isOSAllocated() ); + + return reinterpret_cast( pAddr ); + } + + /// Allocates from the new superblock + block_header * alloc_from_new_superblock( processor_heap * pProcHeap ) + { + superblock_desc * pDesc = new_superblock_desc( pProcHeap ); + assert( pDesc != null_ptr() ); + pDesc->pSB = new_superblock_buffer( pProcHeap ); + + anchor_tag anchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_relaxed); + anchor.tag += 1; + + // Make single-linked list of free blocks in superblock + byte * pEnd = pDesc->pSB + pDesc->nCapacity * pDesc->nBlockSize; + unsigned int nNext = 0; + const unsigned int nBlockSize = pDesc->nBlockSize; + for ( byte * p = pDesc->pSB; p < pEnd; p += nBlockSize ) { + reinterpret_cast( p )->set( pDesc, 0 ); + reinterpret_cast( p )->nNextFree = ++nNext; + } + reinterpret_cast( pEnd - nBlockSize )->nNextFree = 0; + + active_tag newActive; + newActive.set( pDesc, ( (pDesc->nCapacity - 1 < active_tag::c_nMaxCredits) ? pDesc->nCapacity - 1 : active_tag::c_nMaxCredits ) - 1 ); + + anchor.count = pDesc->nCapacity - 1 - (newActive.credits() + 1); + anchor.state = SBSTATE_ACTIVE; + pDesc->anchor.store(anchor, CDS_ATOMIC::memory_order_relaxed); + + active_tag curActive; + if ( pProcHeap->active.compare_exchange_strong( curActive, newActive, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { + pProcHeap->stat.incAllocFromNew(); + //reinterpret_cast( pDesc->pSB )->set( pDesc, 0 ); + return reinterpret_cast( pDesc->pSB ); + } + + free_superblock( pDesc ); + return null_ptr(); + } + + /// Find appropriate processor heap based on size-class selected + processor_heap * find_heap( typename sizeclass_selector::sizeclass_index nSizeClassIndex ) + { + assert( nSizeClassIndex < m_SizeClassSelector.size() ); + + unsigned int nProcessorId = m_Topology.current_processor(); + assert( nProcessorId < m_nProcessorCount ); + + if ( nProcessorId >= m_nProcessorCount ) + nProcessorId = 0; + + processor_desc * pDesc = m_arrProcDesc[ nProcessorId ].load( CDS_ATOMIC::memory_order_relaxed ); + while ( !pDesc ) { + + processor_desc * pNewDesc = new_processor_desc( nProcessorId ); + if ( m_arrProcDesc[nProcessorId].compare_exchange_strong( pDesc, pNewDesc, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) { + pDesc = pNewDesc; + break; + } + free_processor_desc( pNewDesc ); + } + + return &( pDesc->arrProcHeap[ nSizeClassIndex ] ); + } + + /// Updates active field of processor heap \p pProcHeap + void update_active( processor_heap * pProcHeap, superblock_desc * pDesc, unsigned int nCredits ) + { + assert( pProcHeap == pDesc->pProcHeap ); + + active_tag nullActive; + active_tag newActive; + newActive.set( pDesc, nCredits - 1 ); + + if ( pProcHeap->active.compare_exchange_strong( nullActive, newActive, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) ) + return; + + // Someone installed another active superblock. + // Return credits to superblock and make it partial + + anchor_tag oldAnchor; + anchor_tag newAnchor; + + do { + newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire); + newAnchor.count += nCredits; + newAnchor.state = SBSTATE_PARTIAL; + } while ( !pDesc->anchor.compare_exchange_weak( oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + + pDesc->pProcHeap->add_partial( pDesc ); + } + + /// Allocates new processor descriptor + processor_desc * new_processor_desc( unsigned int nProcessorId ) + { + processor_desc * pDesc; + const size_t nPageHeapCount = m_SizeClassSelector.pageTypeCount(); + + /* + Processor descriptor layout + + proc_desc - 64-byte alignment + page_heap[0] 64-byte alignment + page_heap[1] 64-byte alignment + ... + page_heap[P] 64-byte alignment + + proc_heap[0] 64-byte alignment + proc_heap[1] 64-byte alignment + ... + proc_heap[N] 64-byte alignment + */ + + const size_t szDesc = + ( sizeof(processor_desc) + + sizeof(pDesc->pageHeaps[0]) * nPageHeapCount + + c_nAlignment - 1 + ) / c_nAlignment +; + + const size_t szTotal = szDesc * c_nAlignment + sizeof(processor_heap) * m_SizeClassSelector.size(); + + static_assert( (sizeof(processor_heap) % c_nAlignment) == 0, "sizeof(processor_heap) error" ); + + pDesc = new( m_AlignedHeap.alloc( szTotal, c_nAlignment ) ) processor_desc; + + pDesc->pageHeaps = reinterpret_cast( pDesc + 1 ); + for ( size_t i = 0; i < nPageHeapCount; ++i ) + new (pDesc->pageHeaps + i) page_heap( m_SizeClassSelector.page_size(i)); + + // initialize processor heaps + pDesc->arrProcHeap = + reinterpret_cast( + reinterpret_cast(reinterpret_cast(pDesc + 1) + sizeof(pDesc->pageHeaps[0]) * nPageHeapCount + c_nAlignment - 1) + & ~(uptr_atomic_t(c_nAlignment) - 1) + ); + + processor_heap * pProcHeap = pDesc->arrProcHeap; + processor_heap * pProcHeapEnd = pDesc->arrProcHeap + m_SizeClassSelector.size(); + for ( unsigned int i = 0; pProcHeap != pProcHeapEnd; ++pProcHeap, ++i ) { + new (pProcHeap) processor_heap(); + pProcHeap->pProcDesc = pDesc; + pProcHeap->pSizeClass = m_SizeClassSelector.at(i); + if ( m_SizeClassSelector.find( pProcHeap->pSizeClass->nSBSize ) != sizeclass_selector::c_nNoSizeClass ) + pProcHeap->nPageIdx = processor_heap::c_nPageSelfAllocation; + else + pProcHeap->nPageIdx = pProcHeap->pSizeClass->nSBSizeIdx; + } + return pDesc; + } + + + void free_processor_heap( processor_heap * pProcHeap ) + { + if ( pProcHeap->nPageIdx == processor_heap::c_nPageSelfAllocation ) { + superblock_desc * pDesc; + + for ( pDesc = pProcHeap->partialList.pop(); pDesc; pDesc = pProcHeap->partialList.pop()) { + free( pDesc->pSB ); + m_AlignedHeap.free( pDesc ); + } + + superblock_desc * pPartial = pProcHeap->pPartial.load(CDS_ATOMIC::memory_order_relaxed); + if ( pPartial ) { + free( pPartial->pSB ); + m_AlignedHeap.free( pPartial ); + } + + pDesc = pProcHeap->active.load(CDS_ATOMIC::memory_order_relaxed).ptr(); + if ( pDesc ) { + free( pDesc->pSB ); + m_AlignedHeap.free( pDesc ); + } + } + else { + page_heap& pageHeap = pProcHeap->pProcDesc->pageHeaps[pProcHeap->nPageIdx]; + superblock_desc * pDesc; + + for ( pDesc = pProcHeap->partialList.pop(); pDesc; pDesc = pProcHeap->partialList.pop()) { + pageHeap.free( pDesc->pSB ); + m_AlignedHeap.free( pDesc ); + } + + superblock_desc * pPartial = pProcHeap->pPartial.load(CDS_ATOMIC::memory_order_relaxed); + if ( pPartial ) { + pageHeap.free( pPartial->pSB ); + m_AlignedHeap.free( pPartial ); + } + + pDesc = pProcHeap->active.load(CDS_ATOMIC::memory_order_relaxed).ptr(); + if ( pDesc ) { + pageHeap.free( pDesc->pSB ); + m_AlignedHeap.free( pDesc ); + } + } + pProcHeap->~processor_heap(); + } + + /// Frees processor descriptor + void free_processor_desc( processor_desc * pDesc ) + { + const size_t nPageHeapCount = m_SizeClassSelector.pageTypeCount(); + + for (unsigned int j = 0; j < m_SizeClassSelector.size(); ++j ) + free_processor_heap( pDesc->arrProcHeap + j ); + + for ( superblock_desc * pSBDesc = pDesc->listSBDescFree.pop(); pSBDesc; pSBDesc = pDesc->listSBDescFree.pop()) + m_AlignedHeap.free( pSBDesc ); + + for (size_t i = 0; i < nPageHeapCount; ++i ) + (pDesc->pageHeaps + i)->page_heap::~page_heap(); + + //m_IntHeap.free( pDesc->pageHeaps ); + pDesc->pageHeaps = null_ptr(); + + pDesc->processor_desc::~processor_desc(); + m_AlignedHeap.free( pDesc ); + } + + /// Allocates new superblock descriptor + superblock_desc * new_superblock_desc( processor_heap * pProcHeap ) + { + anchor_tag anchor; + superblock_desc * pDesc = pProcHeap->pProcDesc->listSBDescFree.pop(); + if ( pDesc == null_ptr() ) { + pDesc = new( m_AlignedHeap.alloc(sizeof(superblock_desc), c_nAlignment ) ) superblock_desc; + assert( (uptr_atomic_t(pDesc) & (c_nAlignment - 1)) == 0 ); + + anchor = pDesc->anchor.load( CDS_ATOMIC::memory_order_relaxed ); + anchor.tag = 0; + pDesc->anchor.store( anchor, CDS_ATOMIC::memory_order_relaxed ); + + pProcHeap->stat.incDescAllocCount(); + } + pDesc->nBlockSize = pProcHeap->pSizeClass->nBlockSize; + pDesc->nCapacity = pProcHeap->pSizeClass->nCapacity; + assert( pDesc->nCapacity <= c_nMaxBlockInSuperBlock ); + pDesc->pProcHeap = pProcHeap; + + anchor = pDesc->anchor.load( CDS_ATOMIC::memory_order_relaxed ); + anchor.avail = 1; + pDesc->anchor.store( anchor, CDS_ATOMIC::memory_order_relaxed ); + + return pDesc; + } + + /// Allocates superblock page + byte * new_superblock_buffer( processor_heap * pProcHeap ) + { + pProcHeap->stat.incBlockAllocated(); + if ( pProcHeap->nPageIdx == processor_heap::c_nPageSelfAllocation ) { + return (byte *) alloc( pProcHeap->pSizeClass->nSBSize ); + } + else { + return (byte *) pProcHeap->pProcDesc->pageHeaps[pProcHeap->nPageIdx].alloc(); + } + } + + /// Frees superblock descriptor and its page + void free_superblock( superblock_desc * pDesc ) + { + pDesc->pProcHeap->stat.incBlockDeallocated(); + processor_desc * pProcDesc = pDesc->pProcHeap->pProcDesc; + if ( pDesc->pSB ) { + if ( pDesc->pProcHeap->nPageIdx == processor_heap::c_nPageSelfAllocation ) { + free( pDesc->pSB ); + } + else { + pProcDesc->pageHeaps[pDesc->pProcHeap->nPageIdx].free( pDesc->pSB ); + } + } + pProcDesc->listSBDescFree.push( pDesc ); + } + + /// Allocate memory block + block_header * int_alloc( + size_t nSize ///< Size of memory block to allocate in bytes + ) + { + typename sizeclass_selector::sizeclass_index nSizeClassIndex = m_SizeClassSelector.find( nSize ); + if ( nSizeClassIndex == sizeclass_selector::c_nNoSizeClass ) { + return alloc_from_OS( nSize ); + } + assert( nSizeClassIndex < m_SizeClassSelector.size() ); + + block_header * pBlock; + processor_heap * pProcHeap; + while ( true ) { + pProcHeap = find_heap( nSizeClassIndex ); + if ( !pProcHeap ) + return alloc_from_OS( nSize ); + + if ( (pBlock = alloc_from_active( pProcHeap )) != null_ptr() ) + break; + if ( (pBlock = alloc_from_partial( pProcHeap )) != null_ptr() ) + break; + if ( (pBlock = alloc_from_new_superblock( pProcHeap )) != null_ptr() ) + break; + } + + pProcHeap->stat.incAllocatedBytes( pProcHeap->pSizeClass->nBlockSize ); + + assert( pBlock != null_ptr() ); + return pBlock; + } + + //@endcond + public: + /// Heap constructor + Heap() + { + // Explicit libcds initialization is needed since a static object may be constructed + cds::Initialize(); + + m_nProcessorCount = m_Topology.processor_count(); + m_arrProcDesc = new( m_AlignedHeap.alloc(sizeof(processor_desc *) * m_nProcessorCount, c_nAlignment )) + CDS_ATOMIC::atomic[ m_nProcessorCount ]; + memset( m_arrProcDesc, 0, sizeof(processor_desc *) * m_nProcessorCount ) ; // ?? memset for atomic<> + } + + /// Heap destructor + /** + The destructor frees all memory allocated by the heap. + */ + ~Heap() + { + for ( unsigned int i = 0; i < m_nProcessorCount; ++i ) { + processor_desc * pDesc = m_arrProcDesc[i].load(CDS_ATOMIC::memory_order_relaxed); + if ( pDesc ) + free_processor_desc( pDesc ); + } + + m_AlignedHeap.free( m_arrProcDesc ); + + // Explicit termination of libcds + cds::Terminate(); + } + + /// Allocate memory block + void * alloc( + size_t nSize ///< Size of memory block to allocate in bytes + ) + { + block_header * pBlock = int_alloc( nSize + sizeof(block_header) + bound_checker::trailer_size ); + + // Bound checking is only for our blocks + if ( !pBlock->isOSAllocated() ) { + // the block is allocated from our heap - bound checker is applicable + m_BoundChecker.make_trailer( + reinterpret_cast(pBlock + 1), + reinterpret_cast(pBlock) + pBlock->desc()->nBlockSize, + nSize + ); + } + + return pBlock + 1; + } + + /// Free previously allocated memory block + void free( + void * pMemory ///< Pointer to memory block to free + ) + { + if ( !pMemory ) + return; + + block_header * pRedirect = (reinterpret_cast( pMemory ) - 1); + block_header * pBlock = pRedirect->begin(); + + if ( pBlock->isOSAllocated() ) { + // Block has been allocated from OS + m_OSAllocStat.incBytesDeallocated( pBlock->getOSAllocSize() ); + m_LargeHeap.free( pBlock ); + return; + } + + assert( !pBlock->isAligned() ); + superblock_desc * pDesc = pBlock->desc(); + + m_BoundChecker.check_bounds( + pRedirect + 1, + reinterpret_cast( pBlock ) + pDesc->nBlockSize, + pDesc->nBlockSize + ); + + + anchor_tag oldAnchor; + anchor_tag newAnchor; + processor_heap_base * pProcHeap = pDesc->pProcHeap; + + pProcHeap->stat.incDeallocatedBytes( pDesc->nBlockSize ); + + oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire); + do { + newAnchor = oldAnchor; + reinterpret_cast( pBlock )->nNextFree = oldAnchor.avail; + newAnchor.avail = (reinterpret_cast( pBlock ) - pDesc->pSB) / pDesc->nBlockSize; + newAnchor.tag += 1; + + assert( oldAnchor.state != SBSTATE_EMPTY ); + + if ( oldAnchor.state == SBSTATE_FULL ) + newAnchor.state = SBSTATE_PARTIAL; + + if ( oldAnchor.count == pDesc->nCapacity - 1 ) { + //pProcHeap = pDesc->pProcHeap; + //CDS_COMPILER_RW_BARRIER ; // instruction fence is needed?.. + newAnchor.state = SBSTATE_EMPTY; + } + else + newAnchor.count += 1; + } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ); + + pProcHeap->stat.incFreeCount(); + + if ( newAnchor.state == SBSTATE_EMPTY ) { + if ( pProcHeap->unlink_partial( pDesc )) + free_superblock( pDesc ); + } + else if (oldAnchor.state == SBSTATE_FULL ) { + assert( pProcHeap != null_ptr() ); + pProcHeap->stat.decDescFull(); + pProcHeap->add_partial( pDesc ); + } + } + + /// Reallocate memory block + /** + If \p nNewSize is zero, then the block pointed to by \p pMemory is freed; + the return value is \p NULL, and \p pMemory is left pointing at a freed block. + + If there is not enough available memory to expand the block to the given size, + the original block is left unchanged, and \p NULL is returned. + + Aligned memory block cannot be realloc'ed: if \p pMemory has been allocated by \ref alloc_aligned, + then the return value is \p NULL and the original block is left unchanged. + */ + void * realloc( + void * pMemory, ///< Pointer to previously allocated memory block + size_t nNewSize ///< New size of memory block, in bytes + ) + { + if ( nNewSize == 0 ) { + free( pMemory ); + return null_ptr(); + } + + const size_t nOrigSize = nNewSize; + nNewSize += sizeof(block_header) + bound_checker::trailer_size; + + block_header * pBlock = reinterpret_cast( pMemory ) - 1; + + // Reallocation of aligned block is not possible + if ( pBlock->isAligned() ) { + assert( false ); + return null_ptr(); + } + + if ( pBlock->isOSAllocated() ) { + // The block has been allocated from OS + size_t nCurSize = pBlock->getOSAllocSize(); + + if ( nCurSize >= nNewSize ) + return pMemory; + + // Grow block size + void * pNewBuf = alloc( nOrigSize ); + if ( pNewBuf ) { + memcpy( pNewBuf, pMemory, nCurSize - sizeof(block_header) ); + free( pMemory ); + } + return pNewBuf; + } + + superblock_desc * pDesc = pBlock->desc(); + if ( pDesc->nBlockSize <= nNewSize ) { + // In-place reallocation + m_BoundChecker.make_trailer( + reinterpret_cast(pBlock + 1), + reinterpret_cast(pBlock) + pBlock->desc()->nBlockSize, + nOrigSize + ); + + return pMemory; + } + + void * pNew = alloc( nNewSize ); + if ( pNew ) { + memcpy( pNew, pMemory, pDesc->nBlockSize - sizeof(block_header) ); + free( pMemory ); + return pNew; + } + + return null_ptr(); + } + + /// Allocate aligned memory block + void * alloc_aligned( + size_t nSize, ///< Size of memory block to allocate in bytes + size_t nAlignment ///< Alignment + ) + { + if ( nAlignment <= c_nDefaultBlockAlignment ) { + void * p = alloc( nSize ); + assert( (reinterpret_cast(p) & (nAlignment - 1)) == 0 ); + return p; + } + + block_header * pBlock = int_alloc( nSize + nAlignment + sizeof(block_header) + bound_checker::trailer_size ); + + block_header * pRedirect; + if ( (reinterpret_cast( pBlock + 1) & (nAlignment - 1)) != 0 ) { + pRedirect = reinterpret_cast( (reinterpret_cast( pBlock ) & ~(nAlignment - 1)) + nAlignment ) - 1; + assert( pRedirect != pBlock ); + pRedirect->set( reinterpret_cast(pBlock), 1 ); + + assert( (reinterpret_cast(pRedirect + 1) & (nAlignment - 1)) == 0 ); + } + else + pRedirect = pBlock; + + + // Bound checking is only for our blocks + if ( !pBlock->isOSAllocated() ) { + // the block is allocated from our heap - bound checker is applicable + m_BoundChecker.make_trailer( + reinterpret_cast(pRedirect + 1), + reinterpret_cast(pBlock) + pBlock->desc()->nBlockSize, + nSize + ); + } + + return pRedirect + 1; + } + + /// Free aligned memory block previously allocated by \ref alloc_aligned + void free_aligned( + void * pMemory ///< Pointer to memory block to free + ) + { + free( pMemory ); + } + + public: + + /// Get instant summary statistics + void summaryStat( summary_stat& st ) + { + size_t nProcHeapCount = m_SizeClassSelector.size(); + for ( unsigned int nProcessor = 0; nProcessor < m_nProcessorCount; ++nProcessor ) { + processor_desc * pProcDesc = m_arrProcDesc[nProcessor].load(CDS_ATOMIC::memory_order_relaxed); + if ( pProcDesc ) { + for ( unsigned int i = 0; i < nProcHeapCount; ++i ) { + processor_heap_base * pProcHeap = pProcDesc->arrProcHeap + i; + if ( pProcHeap ) { + st.add_procheap_stat( pProcHeap->stat ); + } + } + } + } + + st.add_heap_stat( m_OSAllocStat ); + } + }; + +}}} // namespace cds::memory::michael + +#endif // __CDS_MEMORY_MICHAEL_ALLOCATOR_TMPL_H diff --git a/cds/memory/michael/bound_check.h b/cds/memory/michael/bound_check.h new file mode 100644 index 00000000..46f9f9da --- /dev/null +++ b/cds/memory/michael/bound_check.h @@ -0,0 +1,153 @@ +//$$CDS-header$$ + +#ifndef __CDS_MEMORY_MICHAEL_ALLOCATOR_BOUND_CHECK_H +#define __CDS_MEMORY_MICHAEL_ALLOCATOR_BOUND_CHECK_H + +#include +#include +#include + +namespace cds { namespace memory { namespace michael { + + //@cond + namespace details { + class bound_checker + { + protected: + typedef atomic64u_t trailer_type; + static const trailer_type s_BoundCheckerTrailer = 0xbadcafeedeadc0feULL; + + public: + enum { + trailer_size = sizeof(trailer_type) + sizeof(size_t) + }; + + void make_trailer( void * pStartArea, void * pEndBlock, size_t nAllocSize ) + { + char * pArea = reinterpret_cast(pStartArea); + assert( reinterpret_cast(pEndBlock) - (pArea + nAllocSize) >= trailer_size ); + + trailer_type trailer = s_BoundCheckerTrailer; + memcpy( pArea + nAllocSize, &trailer, sizeof(trailer) ); + + // the next assignment is correct because pBlock is at least sizeof(size_t)-byte aligned + assert( (reinterpret_cast(pEndBlock) & (sizeof(size_t) - 1)) == 0 ); + *(reinterpret_cast( pEndBlock ) - 1) = nAllocSize; + } + + bool check_bounds( void * pStartArea, void * pEndBlock, size_t nBlockSize ) + { + trailer_type trailer = s_BoundCheckerTrailer; + size_t nAllocSize = *(reinterpret_cast( pEndBlock ) - 1); + + assert( nAllocSize < nBlockSize ); + return nAllocSize < nBlockSize + && memcmp( reinterpret_cast(pStartArea) + nAllocSize, &trailer, sizeof(trailer) ) == 0; + } + }; + } + //@endcond + +#if defined(CDS_DOXYGEN_INVOKED) || defined(_DEBUG) + /// Debug bound checker + /** + This is one of value of opt::check_bounds option for Michael's \ref Heap memory allocator. + It is intended for debug mode only. It throws an assertion when memory bound violation is detected. + In release mode it is equal to opt::check_bounds . + */ + class debug_bound_checking: public details::bound_checker + { + //@cond + typedef details::bound_checker base_class; + public: + void check_bounds( void * pStartArea, void * pEndBlock, size_t nBlockSize ) + { + // Bound checking assertion + assert( base_class::check_bounds( pStartArea, pEndBlock, nBlockSize ) ); + } + + //@endcond + }; +#else + typedef cds::opt::none debug_bound_checking; +#endif + + /// %Exception of \ref strong_bound_checking bound checker + class bound_checker_exception: public std::exception + { + //@cond + public: + virtual const char * what() const throw() + { + return "Memory bound checking violation"; + } + //@endcond + }; + + /// %Exception throwing bound checker + /** + This is one of value of opt::check_bounds option for Michael's \ref Heap memory allocator. + It is intended for debug and release mode. + When memory bound violation is detected + \li In debug mode - an assertion is raised + \li In release mode - an exception of type \ref bound_checker_exception is thrown + */ + class strong_bound_checking: public details::bound_checker + { + //@cond + typedef details::bound_checker base_class; + public: + void check_bounds( void * pStartArea, void * pEndBlock, size_t nBlockSize ) + { + if ( !base_class::check_bounds( pStartArea, pEndBlock, nBlockSize ) ) { + throw bound_checker_exception(); + } + } + + //@endcond + }; + + + //@cond + namespace details { + template + class bound_checker_selector: public BOUND_CHECKER + { + typedef BOUND_CHECKER base_class; + public: + enum { + trailer_size = base_class::trailer_size + }; + + void make_trailer( void * pStartArea, void * pEndBlock, size_t nAllocSize ) + { + base_class::make_trailer( pStartArea, pEndBlock, nAllocSize ); + } + + void check_bounds( void * pStartArea, void * pEndBlock, size_t nBlockSize ) + { + base_class::check_bounds( pStartArea, pEndBlock, nBlockSize ); + } + }; + + template <> + class bound_checker_selector + { + public: + enum { + trailer_size = 0 + }; + + void make_trailer( void * /*pStartArea*/, void * /*pEndBlock*/, size_t /*nAllocSize*/ ) + {} + + void check_bounds( void * /*pStartArea*/, void * /*pEndBlock*/, size_t /*nBlockSize*/ ) + {} + }; + } // namespace details + //@endcond + + +}}} // namespace cds::memory::michael + +#endif // #ifndef __CDS_MEMORY_MICHAEL_ALLOCATOR_BOUND_CHECK_H diff --git a/cds/memory/michael/options.h b/cds/memory/michael/options.h new file mode 100644 index 00000000..95a9d5a8 --- /dev/null +++ b/cds/memory/michael/options.h @@ -0,0 +1,254 @@ +//$$CDS-header$$ + +#ifndef __CDS_MEMORY_MICHAEL_OPTIONS_H +#define __CDS_MEMORY_MICHAEL_OPTIONS_H + +/* + Options for Michael allocator + Source: + [2004] Maged Michael "Scalable Lock-Free Dynamic Memory Allocation" + + Editions: + 2011.01.23 khizmax Created +*/ + +#include + +namespace cds { namespace memory { namespace michael { + + /// Options related for Michael's allocator \ref Heap + namespace opt { + using namespace cds::opt; + + /// Option setter specifies system topology + /** + See cds::OS::Win32::topology for interface example. + + Default type: \p cds::OS::topology selects appropriate implementation for target system. + */ + template + struct sys_topology { + //@cond + template struct pack: public BASE + { + typedef TOPOLOGY sys_topology; + }; + //@endcond + }; + + /// Option setter specifies system heap for large blocks + /** + If the block size requested is more that Michael's allocator upper limit + then an allocator provided by \p system_heap option is called. + By default, Michael's allocator can maintain blocks up to 64K bytes length; + for blocks larger than 64K the allocator defined by this option is used. + + Available \p HEAP implementations: + - malloc_heap + */ + template + struct system_heap + { + //@cond + template struct pack: public BASE + { + typedef HEAP system_heap; + }; + //@endcond + }; + + /// Option setter specifies internal aligned heap + /** + This heap is used by Michael's allocator for obtaining aligned memory. + + Available \p HEAP implementations: + - aligned_malloc_heap + */ + template + struct aligned_heap { + //@cond + template struct pack: public BASE + { + typedef HEAP aligned_heap; + }; + //@endcond + }; + + /// Option setter specifies page heap + /** + This heap is used by Michael's allocator for superblock allocation. + The size of superblock is: + - 64K - for small blocks + - 1M - for other blocks + + Available \p HEAP implementations: + - page_allocator + - page_cached_allocator + */ + template + struct page_heap { + //@cond + template struct pack: public BASE + { + typedef HEAP page_heap; + }; + //@endcond + }; + + /// Option setter specifies size-class selector + /** + The size-class selector determines the best size-class for requested block size, + i.e. it specifies allocation granularity. + In fact, it selects superblock descriptor within processor heap. + + Available \p Type implementation: + - default_sizeclass_selector + */ + template + struct sizeclass_selector { + //@cond + template struct pack: public BASE + { + typedef Type sizeclass_selector; + }; + //@endcond + }; + + /// Option setter specifies free-list of superblock descriptor + /** + Available \p Type implementations: + - free_list_locked + */ + template + struct free_list { + //@cond + template struct pack: public BASE + { + typedef Type free_list; + }; + //@endcond + }; + + /// Option setter specifies partial list of superblocks + /** + Available \p Type implementations: + - partial_list_locked + */ + template + struct partial_list { + //@cond + template struct pack: public BASE + { + typedef Type partial_list; + }; + //@endcond + }; + + /// Option setter for processor heap statistics + /** + The option specifies a type for gathering internal processor heap statistics. + The processor heap statistics is gathered on per processor basis. + Large memory block (more than 64K) allocated directly from OS does not fall into these statistics. + For OS-allocated memory block see \ref os_allocated_stat option. + + Available \p Type implementations: + - \ref procheap_atomic_stat + - \ref procheap_empty_stat + + For interface of type \p Type see \ref procheap_atomic_stat. + */ + template + struct procheap_stat { + //@cond + template struct pack: public BASE + { + typedef Type procheap_stat; + }; + //@endcond + }; + + /// Option setter for OS-allocated memory + /** + The option specifies a type for gathering internal statistics of + large (OS-allocated) memory blocks that is too big to maintain by Michael's heap + (with default \ref sizeclass_selector, the block that large than 64K is not + maintained by Michael's heap and passed directly to system allocator). + + Note that OS-allocated memory statistics does not include memory allocation + for heap's internal purposes. Only direct call of \p alloc or \p alloc_aligned + for large memory block is counted. + + Available \p Type implementations: + - \ref os_allocated_atomic + - \ref os_allocated_empty + */ + template + struct os_allocated_stat { + //@cond + template struct pack: public BASE + { + typedef Type os_allocated_stat; + }; + //@endcond + }; + + /// Option setter for bounds checking + /** + This option defines a strategy to check upper memory boundary of allocated blocks. + \p Type defines a class for bound checking with following interface: + + \code + class bound_checker + { + public: + enum { + trailer_size = numeric_const + }; + + void make_trailer( void * pStartArea, void * pEndBlock, size_t nAllocSize ); + bool check_bounds( void * pStartArea, void * pEndBlock, size_t nBlockSize ); + } + \endcode + + Before allocating a memory block of size N, the heap adds the \p trailer_size to N and really it + allocates N + trailer_size bytes. Then, the heap calls \p make_trailer function of bound checker with arguments: + - \p pStartArea - start of allocated block + - \p pEndBlock - the first byte after really allocated block; \code pEndBlock - pStartArea >= N + trailer_size \endcode + - \p nAllocSize - requested size in bytes (i.e. N) + So, \p make_trailer function can place some predefined value called bound mark of any type, for example, int64, + on address pStartArea + nAllocSize, and store real allocated block size N to pEndBlock - sizeof(size_t). + In this example, \p trailer_size constant is equal sizeof(int64) + sizeof(size_t). + + Before the memory block previously allocated is deallocating, the \p check_bounds function is called. + The function has similar signature: + - \p pStartArea - start of allocated block (like \p make_trailer fist argument) + - \p pEndBlock - the first byte after allocated block (like \p make_trailer second argument) + - \p nBlockSize - real allocated block size, not equal to \p nAllocSize argument of \p make_trailer + + The function can: + - calculate real allocated block size: \code N = *reinterpret_cast(pEndBlock - sizeof(size_t)) \endcode + - check whether the bound mark is unchanged: \code *reinterpret_cast(pStartArea + N) == bound_mark \endcode + - if it is not equal - make assertion + + The library provides the following predefined bound checkers, i.e they are possible values of \p Type + template argument: + \li cds::opt::none - no bound checking is performed (default) + \li michael::debug_bound_checking - an assertion is thrown when memory bound violation is detected. + This option is acceptable only in debug mode. For release mode it is equal to cds::opt::none. + \li michael::strong_bound_checking - an assertion is thrown in debug mode if memory bound violation is detected; + an exception is thrown in release mode. + */ + template + struct check_bounds { + //@cond + template struct pack: public BASE + { + typedef Type check_bounds; + }; + //@endcond + }; + } + +}}} // namespace cds::memory::michael + +#endif // #ifndef __CDS_MEMORY_MICHAEL_OPTIONS_H diff --git a/cds/memory/michael/osalloc_stat.h b/cds/memory/michael/osalloc_stat.h new file mode 100644 index 00000000..4d04dfb0 --- /dev/null +++ b/cds/memory/michael/osalloc_stat.h @@ -0,0 +1,111 @@ +//$$CDS-header$$ + +#ifndef __CDS_MEMORY_MICHAEL_ALLOCATOR_OSALLOC_STAT_H +#define __CDS_MEMORY_MICHAEL_ALLOCATOR_OSALLOC_STAT_H + +#include + +namespace cds { namespace memory { namespace michael { + + /// Statistics for large (allocated directly from %OS) block + struct os_allocated_atomic + { + ///@cond + CDS_ATOMIC::atomic nAllocCount ; ///< Event count of large block allocation from %OS + CDS_ATOMIC::atomic nFreeCount ; ///< Event count of large block deallocation to %OS + CDS_ATOMIC::atomic nBytesAllocated ; ///< Total size of allocated large blocks, in bytes + CDS_ATOMIC::atomic nBytesDeallocated ; ///< Total size of deallocated large blocks, in bytes + + os_allocated_atomic() + : nAllocCount(0) + , nFreeCount(0) + , nBytesAllocated(0) + , nBytesDeallocated(0) + {} + ///@endcond + + /// Adds \p nSize to nBytesAllocated counter + void incBytesAllocated( size_t nSize ) + { + nAllocCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed); + nBytesAllocated.fetch_add( nSize, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Adds \p nSize to nBytesDeallocated counter + void incBytesDeallocated( size_t nSize ) + { + nFreeCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + nBytesDeallocated.fetch_add( nSize, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Returns count of \p alloc and \p alloc_aligned function call (for large block allocated directly from %OS) + size_t allocCount() const + { + return nAllocCount.load(CDS_ATOMIC::memory_order_relaxed); + } + + /// Returns count of \p free and \p free_aligned function call (for large block allocated directly from %OS) + size_t freeCount() const + { + return nFreeCount.load(CDS_ATOMIC::memory_order_relaxed); + } + + /// Returns current value of nBytesAllocated counter + atomic64u_t allocatedBytes() const + { + return nBytesAllocated.load(CDS_ATOMIC::memory_order_relaxed); + } + + /// Returns current value of nBytesAllocated counter + atomic64u_t deallocatedBytes() const + { + return nBytesDeallocated.load(CDS_ATOMIC::memory_order_relaxed); + } + }; + + /// Dummy statistics for large (allocated directly from %OS) block + /** + This class does not gather any statistics. + Class interface is the same as \ref os_allocated_atomic. + */ + struct os_allocated_empty + { + //@cond + /// Adds \p nSize to nBytesAllocated counter + void incBytesAllocated( size_t nSize ) + { CDS_UNUSED(nSize); } + + /// Adds \p nSize to nBytesDeallocated counter + void incBytesDeallocated( size_t nSize ) + { CDS_UNUSED(nSize); } + + /// Returns count of \p alloc and \p alloc_aligned function call (for large block allocated directly from OS) + size_t allocCount() const + { + return 0; + } + + /// Returns count of \p free and \p free_aligned function call (for large block allocated directly from OS) + size_t freeCount() const + { + return 0; + } + + /// Returns current value of nBytesAllocated counter + atomic64u_t allocatedBytes() const + { + return 0; + } + + /// Returns current value of nBytesAllocated counter + atomic64u_t deallocatedBytes() const + { + return 0; + } + //@endcond + }; + + +}}} // namespace cds::memory::michael + +#endif /// __CDS_MEMORY_MICHAEL_ALLOCATOR_OSALLOC_STAT_H diff --git a/cds/memory/michael/procheap_stat.h b/cds/memory/michael/procheap_stat.h new file mode 100644 index 00000000..fa2466c5 --- /dev/null +++ b/cds/memory/michael/procheap_stat.h @@ -0,0 +1,392 @@ +//$$CDS-header$$ + +#ifndef __CDS_MEMORY_MICHAEL_ALLOCATOR_PROCHEAP_STAT_H +#define __CDS_MEMORY_MICHAEL_ALLOCATOR_PROCHEAP_STAT_H + +#include + +namespace cds { namespace memory { namespace michael { + + /// processor heap statistics + /** + This class is implementation of \ref opt::procheap_stat option. + The statistic counter implementation is based on atomic operations. + + Template parameters: + - \p INC_FENCE - memory fence for increment operation (default is release semantics) + - \p READ_FENCE - memory fence for reading of statistic values (default is acquire semantics) + */ + class procheap_atomic_stat + { + //@cond + CDS_ATOMIC::atomic nAllocFromActive ; ///< Event count of allocation from active superblock + CDS_ATOMIC::atomic nAllocFromPartial ; ///< Event count of allocation from partial superblock + CDS_ATOMIC::atomic nAllocFromNew ; ///< Event count of allocation from new superblock + CDS_ATOMIC::atomic nFreeCount ; ///< \ref free function call count + CDS_ATOMIC::atomic nBlockCount ; ///< Count of superblock allocated + CDS_ATOMIC::atomic nBlockDeallocCount ; ///< Count of superblock deallocated + CDS_ATOMIC::atomic nDescAllocCount ; ///< Count of superblock descriptors + CDS_ATOMIC::atomic nDescFull ; ///< Count of full superblock + CDS_ATOMIC::atomic nBytesAllocated ; ///< Count of allocated bytes + CDS_ATOMIC::atomic nBytesDeallocated ; ///< Count of deallocated bytes + + CDS_ATOMIC::atomic nActiveDescCASFailureCount ; ///< CAS failure counter for active block of \p alloc_from_active Heap function + CDS_ATOMIC::atomic nActiveAnchorCASFailureCount; ///< CAS failure counter for active block of \p alloc_from_active Heap function + CDS_ATOMIC::atomic nPartialDescCASFailureCount ; ///< CAS failure counter for partial block of \p alloc_from_partial Heap function + CDS_ATOMIC::atomic nPartialAnchorCASFailureCount; ///< CAS failure counter for partial block of \p alloc_from_partial Heap function + + //@endcond + + public: + //@cond + procheap_atomic_stat() + : nAllocFromActive(0) + , nAllocFromPartial(0) + , nAllocFromNew(0) + , nFreeCount(0) + , nBlockCount(0) + , nDescFull(0) + , nBytesAllocated(0) + , nBytesDeallocated(0) + , nActiveDescCASFailureCount(0) + , nActiveAnchorCASFailureCount(0) + , nPartialDescCASFailureCount(0) + , nPartialAnchorCASFailureCount(0) + {} + //@endcond + + public: + /// Increment event counter of allocation from active superblock + void incAllocFromActive() + { + nAllocFromActive.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + } + /// Increment event counter of allocation from active superblock by \p n + void incAllocFromActive( size_t n ) + { + nAllocFromActive.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Increment event counter of allocation from partial superblock + void incAllocFromPartial() + { + nAllocFromPartial.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + } + /// Increment event counter of allocation from partial superblock by \p n + void incAllocFromPartial( size_t n ) + { + nAllocFromPartial.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Increment event count of allocation from new superblock + void incAllocFromNew() + { + nAllocFromNew.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + } + /// Increment event count of allocation from new superblock by \p n + void incAllocFromNew( size_t n ) + { + nAllocFromNew.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Increment event counter of free calling + void incFreeCount() + { + nFreeCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + } + /// Increment event counter of free calling by \p n + void incFreeCount( size_t n ) + { + nFreeCount.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Increment counter of superblock allocated + void incBlockAllocated() + { + nBlockCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + } + /// Increment counter of superblock allocated by \p n + void incBlockAllocated( size_t n ) + { + nBlockCount.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Increment counter of superblock deallocated + void incBlockDeallocated() + { + nBlockDeallocCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + } + /// Increment counter of superblock deallocated by \p n + void incBlockDeallocated( size_t n ) + { + nBlockDeallocCount.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Increment counter of superblock descriptor allocated + void incDescAllocCount() + { + nDescAllocCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + } + /// Increment counter of superblock descriptor allocated by \p n + void incDescAllocCount( size_t n ) + { + nDescAllocCount.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Increment counter of full superblock descriptor + void incDescFull() + { + nDescFull.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + } + /// Increment counter of full superblock descriptor by \p n + void incDescFull( size_t n ) + { + nDescFull.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Decrement counter of full superblock descriptor + void decDescFull() + { + nDescFull.fetch_sub( 1, CDS_ATOMIC::memory_order_relaxed ); + } + /// Decrement counter of full superblock descriptor by \p n + void decDescFull(size_t n) + { + nDescFull.fetch_sub( n, CDS_ATOMIC::memory_order_relaxed ); + } + /// Add \p nBytes to allocated bytes counter + void incAllocatedBytes( size_t nBytes ) + { + nBytesAllocated.fetch_add( nBytes, CDS_ATOMIC::memory_order_relaxed ); + } + /// Add \p nBytes to deallocated bytes counter + void incDeallocatedBytes( size_t nBytes ) + { + nBytesDeallocated.fetch_add( nBytes, CDS_ATOMIC::memory_order_relaxed); + } + + /// Add \p nCount to CAS failure counter of updating \p active field of active descriptor for \p alloc_from_active internal Heap function + void incActiveDescCASFailureCount( int nCount ) + { + nActiveDescCASFailureCount.fetch_add( nCount, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Add \p nCount to CAS failure counter of updating \p anchor field of active descriptor for \p alloc_from_active internal Heap function + void incActiveAnchorCASFailureCount( int nCount ) + { + nActiveAnchorCASFailureCount.fetch_add( nCount, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Add \p nCount to CAS failure counter of updating \p active field of partial descriptor for \p alloc_from_partial internal Heap function + void incPartialDescCASFailureCount( int nCount ) + { + nPartialDescCASFailureCount.fetch_add( nCount, CDS_ATOMIC::memory_order_relaxed ); + } + + /// Add \p nCount to CAS failure counter of updating \p anchor field of partial descriptor for \p alloc_from_partial internal Heap function + void incPartialAnchorCASFailureCount( int nCount ) + { + nPartialAnchorCASFailureCount.fetch_add( nCount, CDS_ATOMIC::memory_order_relaxed ); + } + + // ----------------------------------------------------------------- + // Reading + + /// Read event counter of allocation from active superblock + size_t allocFromActive() const + { + return nAllocFromActive.load(CDS_ATOMIC::memory_order_relaxed); + } + + /// Read event counter of allocation from partial superblock + size_t allocFromPartial() const + { + return nAllocFromPartial.load(CDS_ATOMIC::memory_order_relaxed); + } + + /// Read event count of allocation from new superblock + size_t allocFromNew() const + { + return nAllocFromNew.load(CDS_ATOMIC::memory_order_relaxed); + } + + /// Read event counter of free calling + size_t freeCount() const + { + return nFreeCount.load(CDS_ATOMIC::memory_order_relaxed); + } + + /// Read counter of superblock allocated + size_t blockAllocated() const + { + return nBlockCount.load(CDS_ATOMIC::memory_order_relaxed); + } + + /// Read counter of superblock deallocated + size_t blockDeallocated() const + { + return nBlockDeallocCount.load(CDS_ATOMIC::memory_order_relaxed); + } + + /// Read counter of superblock descriptor allocated + size_t descAllocCount() const + { + return nDescAllocCount.load(CDS_ATOMIC::memory_order_relaxed); + } + + /// Read counter of full superblock descriptor + size_t descFull() const + { + return nDescFull.load(CDS_ATOMIC::memory_order_relaxed); + } + + /// Get counter of allocated bytes + /** + This counter only counts the bytes allocated by Heap, OS allocation (large blocks) is not counted. + + To get count of bytes allocated but not yet deallocated you should call + \code allocatedBytes() - deallocatedBytes() \endcode + */ + atomic64u_t allocatedBytes() const + { + return nBytesAllocated.load(CDS_ATOMIC::memory_order_relaxed); + } + + /// Get counter of deallocated bytes + /** + This counter only counts the bytes allocated by Heap, OS allocation (large blocks) is not counted.unter of deallocated bytes + + See \ref allocatedBytes notes + */ + atomic64u_t deallocatedBytes() const + { + return nBytesDeallocated.load(CDS_ATOMIC::memory_order_relaxed); + } + + /// Get CAS failure counter of updating \p active field of active descriptor for \p alloc_from_active internal Heap function + size_t activeDescCASFailureCount() const + { + return nActiveDescCASFailureCount.load(CDS_ATOMIC::memory_order_relaxed); + } + + /// Get CAS failure counter of updating \p anchor field of active descriptor for \p alloc_from_active internal Heap function + size_t activeAnchorCASFailureCount() const + { + return nActiveAnchorCASFailureCount.load(CDS_ATOMIC::memory_order_relaxed); + } + + /// Get CAS failure counter of updating \p active field of partial descriptor for \p alloc_from_active internal Heap function + size_t partialDescCASFailureCount() const + { + return nPartialDescCASFailureCount.load(CDS_ATOMIC::memory_order_relaxed); + } + + /// Get CAS failure counter of updating \p anchor field of partial descriptor for \p alloc_from_active internal Heap function + size_t partialAnchorCASFailureCount() const + { + return nPartialAnchorCASFailureCount.load(CDS_ATOMIC::memory_order_relaxed); + } + }; + + /// Empty processor heap statistics + /** + This class is dummy implementation of \ref opt::procheap_stat option. + No statistic gathering is performed. + + Interface - see procheap_atomic_stat. + All getter methods return 0. + */ + class procheap_empty_stat + { + //@cond + public: + void incAllocFromActive() + {} + void incAllocFromPartial() + {} + void incAllocFromNew() + {} + void incFreeCount() + {} + void incBlockAllocated() + {} + void incBlockDeallocated() + {} + void incDescAllocCount() + {} + void incDescFull() + {} + void decDescFull() + {} + + // Add ------------------------------------------------------------- + void incAllocFromActive(size_t) + {} + void incAllocFromPartial(size_t) + {} + void incAllocFromNew(size_t) + {} + void incFreeCount(size_t) + {} + void incBlockAllocated(size_t) + {} + void incBlockDeallocated(size_t) + {} + void incDescAllocCount(size_t) + {} + void incDescFull(size_t) + {} + void decDescFull(size_t) + {} + void incAllocatedBytes( size_t /*nBytes*/ ) + {} + void incDeallocatedBytes( size_t /*nBytes*/ ) + {} + void incActiveDescCASFailureCount( int /*nCount*/ ) + {} + void incActiveAnchorCASFailureCount( int /*nCount*/ ) + {} + void incPartialDescCASFailureCount( int /*nCount*/ ) + {} + void incPartialAnchorCASFailureCount( int /*nCount*/ ) + {} + + // ----------------------------------------------------------------- + // Reading + + size_t allocFromActive() const + { return 0; } + size_t allocFromPartial() const + { return 0; } + size_t allocFromNew() const + { return 0; } + size_t freeCount() const + { return 0; } + size_t blockAllocated() const + { return 0; } + size_t blockDeallocated() const + { return 0; } + size_t descAllocCount() const + { return 0; } + size_t descFull() const + { return 0; } + atomic64u_t allocatedBytes() const + { return 0; } + atomic64u_t deallocatedBytes() const + { return 0; } + size_t activeDescCASFailureCount() const + { return 0; } + size_t activeAnchorCASFailureCount() const + { return 0; } + size_t partialDescCASFailureCount() const + { return 0; } + size_t partialAnchorCASFailureCount() const + { return 0; } + + //@endcond + }; + + +}}} // namespace cds::memory::michael + +#endif /// __CDS_MEMORY_MICHAEL_ALLOCATOR_PROCHEAP_STAT_H diff --git a/cds/memory/pool_allocator.h b/cds/memory/pool_allocator.h new file mode 100644 index 00000000..e821749c --- /dev/null +++ b/cds/memory/pool_allocator.h @@ -0,0 +1,129 @@ +//$$CDS-header$$ + +#ifndef __CDS_MEMORY_POOL_ALLOCATOR_H +#define __CDS_MEMORY_POOL_ALLOCATOR_H + +#include +#include + +namespace cds { namespace memory { + + ///@defgroup cds_memory_pool Simple memory pool + + /// Pool allocator adapter + /** + This class is an adapter for an object pool. It gives \p std::allocator interface + for the @ref cds_memory_pool "pool". + + Template arguments: + - \p T - value type + - \p Accessor - a functor to access to pool object. The pool has the following interface: + \code + template + class pool { + typedef T value_type ; // Object type maintained by pool + T * allocate( size_t n ) ; // Allocate an array of object of type T + void deallocate( T * p, size_t n ) ; // Deallocate the array p of size n + }; + \endcode + + Usage + + Suppose, we have got a pool with interface above. Usually, the pool is a static object: + \code + static pool thePool; + \endcode + + The \p %pool_allocator gives \p std::allocator interface for the pool. + It is needed to declare an accessor functor to access to \p thePool: + \code + struct pool_accessor { + typedef typename pool::value_type value_type; + + pool& operator()() const + { + return thePool; + } + }; + \endcode + + Now, cds::memory::pool_allocator< T, pool_accessor > can be used instead of \p std::allocator. + */ + template + class pool_allocator + { + //@cond + public: + typedef Accessor accessor_type; + + typedef size_t size_type; + typedef ptrdiff_t difference_type; + typedef T* pointer; + typedef const T* const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef T value_type; + + template struct rebind { + typedef pool_allocator other; + }; + + public: + pool_allocator() CDS_NOEXCEPT + {} + + pool_allocator(const pool_allocator&) CDS_NOEXCEPT + {} + template pool_allocator(const pool_allocator&) CDS_NOEXCEPT + {} + ~pool_allocator() + {} + + pointer address(reference x) const CDS_NOEXCEPT + { + return &x; + } + const_pointer address(const_reference x) const CDS_NOEXCEPT + { + return &x; + } + pointer allocate( size_type n, void const * hint = 0) + { + static_assert( sizeof(value_type) <= sizeof(typename accessor_type::value_type), "Incompatible type" ); + + return reinterpret_cast( accessor_type()().allocate( n )); + } + void deallocate(pointer p, size_type n) CDS_NOEXCEPT + { + accessor_type()().deallocate( reinterpret_cast( p ), n ); + } + size_type max_size() const CDS_NOEXCEPT + { + return size_t(-1) / sizeof(value_type); + } + +# if defined(CDS_MOVE_SEMANTICS_SUPPORT) && defined(CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT) + template + void construct(U* p, Args&&... args) + { + new((void *)p) U( std::forward(args)...); + } +# else + template + void construct(pointer p, Arg const& val ) + { + new((void *) p) value_type(val); + } +# endif + template + void destroy(U* p) + { + p->~U(); + } + //@endcond + }; + +}} // namespace cds::memory + + +#endif // #ifndef __CDS_MEMORY_POOL_ALLOCATOR_H diff --git a/cds/memory/vyukov_queue_pool.h b/cds/memory/vyukov_queue_pool.h new file mode 100644 index 00000000..250e371e --- /dev/null +++ b/cds/memory/vyukov_queue_pool.h @@ -0,0 +1,481 @@ +//$$CDS-header$$ + +#ifndef __CDS_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H +#define __CDS_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H + +#include +#include + +namespace cds { namespace memory { + + /// Free-list based on bounded lock-free queue cds::intrusive::VyukovMPMCCycleQueue + /** @ingroup cds_memory_pool + Template parameters: + - \p T - the type of object maintaining by free-list + - \p Options - the options of cds::intrusive::VyukovMPMCCycleQueue class plus + cds::opt::allocator option. + + \b Internals + + This free-list is very simple. + At construction time, the free-list allocates the array of N items + and stores them into queue, where N is the queue capacity. + When allocating the free-list tries to pop an object from + internal queue i.e. from preallocated pool. If success the popped object is returned. + Otherwise a new one is allocated. When deallocating, the free-list checks whether + the object is from preallocated pool. If so, the object is pushed into queue, otherwise + it is deallocated by using the allocator provided. + The pool can manage more than \p N items but only \p N items is contained in the free-list. + + \b Usage + + \p %vyukov_queue_pool should be used together with \ref pool_allocator. + You should declare an static object of type \p %vyukov_queue_pool, provide + an accessor to that object and use \p pool_allocator as an allocator: + \code + #include + #include + + // Pool of Foo object of size 1024. + typedef cds::memory::vyukov_queue_pool< Foo, cds::opt::buffer< cds::opt::v::static_buffer< Foo, 1024 > > pool_type; + static pool_type thePool; + + struct pool_accessor { + typedef typename pool_type::value_type value_type; + + pool_type& operator()() const + { + return thePool; + } + }; + + // Declare pool allocator + typedef cds::memory::pool_allocator< Foo, pool_accessor > pool_allocator; + + // Use pool_allocator + // Allocate an object + Foo * p = pool_allocator().allocate( 1 ); + + // construct object + new(p) Foo; + + //... + + // Destruct object + p->~Foo(); + + // Deallocate object + pool_allocator().deallocate( p , 1 ); + \endcode + */ + template + class vyukov_queue_pool + { + public: + typedef cds::intrusive::VyukovMPMCCycleQueue< T, CDS_OPTIONS6 > queue_type ; ///< Queue type + + protected: + //@cond + struct default_options: public queue_type::options + { + typedef CDS_DEFAULT_ALLOCATOR allocator; + }; + typedef typename opt::make_options< default_options, CDS_OPTIONS6 >::type options; + //@endcond + + public: + typedef T value_type ; ///< Value type + typedef typename options::allocator::template rebind::other allocator_type ; ///< allocator type + + protected: + //@cond + queue_type m_Queue; + value_type * m_pFirst; + value_type * m_pLast; + //@endcond + + protected: + //@cond + void preallocate_pool() + { + m_pFirst = allocator_type().allocate( m_Queue.capacity() ); + m_pLast = m_pFirst + m_Queue.capacity(); + + for ( value_type * p = m_pFirst; p < m_pLast; ++p ) + CDS_VERIFY( m_Queue.push( *p )) ; // must be true + } + + bool from_pool( value_type * p ) const + { + return m_pFirst <= p && p < m_pLast; + } + //@endcond + + public: + /// Preallocates the pool of object + /** + \p nCapacity argument is the queue capacity. It should be passed + if the queue is based on dynamically-allocated buffer. + See cds::intrusive::VyukovMPMCCycleQueue for explanation. + */ + vyukov_queue_pool( size_t nCapacity = 0 ) + : m_Queue( nCapacity ) + { + preallocate_pool(); + } + + /// Deallocates the pool. + ~vyukov_queue_pool() + { + m_Queue.clear(); + allocator_type().deallocate( m_pFirst, m_Queue.capacity() ); + } + + /// Allocates an object from pool + /** + The pool supports allocation only single object (\p n = 1). + If \p n > 1 the behaviour is undefined. + + If the queue is not empty, the popped value is returned. + Otherwise, a new value allocated. + */ + value_type * allocate( size_t n ) + { + assert( n == 1 ); + + value_type * p = m_Queue.pop(); + if ( p ) { + assert( from_pool(p) ); + return p; + } + + return allocator_type().allocate( n ); + } + + /// Deallocated the object \p p + /** + The pool supports allocation only single object (\p n = 1). + If \p n > 1 the behaviour is undefined. + + If \p p is from preallocated pool, it pushes into the queue. + Otherwise, \p p is deallocated by allocator provided. + */ + void deallocate( value_type * p, size_t n ) + { + assert( n == 1 ); + + if ( p ) { + if ( from_pool( p ) ) + m_Queue.push( *p ); + else + allocator_type().deallocate( p, n ); + } + } + }; + + /// Lazy free-list based on bounded lock-free queue cds::intrusive::VyukovMPMCCycleQueue + /** @ingroup cds_memory_pool + Template parameters: + - \p T - the type of object maintaining by free-list + - \p Options - the options of cds::intrusive::VyukovMPMCCycleQueue class plus + cds::opt::allocator option. + + \b Internals + + This free-list is very simple. + At construction time the pool is empty. + When allocating the free-list tries to pop an object from + internal queue. If success the popped object is returned. + Otherwise a new one is allocated. + When deallocating, the free-list tries to push the object into the pool. + If internal queue is full, the object is deallocated by using the allocator provided. + The pool can manage more than \p N items but only \p N items is placed in the free-list. + + \b Usage + + \p %lazy_vyukov_queue_pool should be used together with \ref pool_allocator. + You should declare an static object of type \p %lazy_vyukov_queue_pool, provide + an accessor functor to this object and use \p pool_allocator as an allocator: + \code + #include + #include + + // Pool of Foo object of size 1024. + typedef cds::memory::lazy_vyukov_queue_pool< Foo, cds::opt::buffer< cds::opt::v::dynamic_buffer< Foo > > pool_type; + static pool_type thePool( 1024 ); + + struct pool_accessor { + typedef typename pool_type::value_type value_type; + + pool_type& operator()() const + { + return thePool; + } + }; + + // Declare pool allocator + typedef cds::memory::pool_allocator< Foo, pool_accessor > pool_allocator; + + // Use pool_allocator + // Allocate an object + Foo * p = pool_allocator().allocate( 1 ); + + // construct object + new(p) Foo; + + //... + + // Destruct object + p->~Foo(); + + // Deallocate object + pool_allocator().deallocate( p , 1 ); + \endcode + + */ + template + class lazy_vyukov_queue_pool + { + public: + typedef cds::intrusive::VyukovMPMCCycleQueue< T, CDS_OPTIONS6 > queue_type ; ///< Queue type + + protected: + //@cond + struct default_options: public queue_type::options + { + typedef CDS_DEFAULT_ALLOCATOR allocator; + }; + typedef typename opt::make_options< default_options, CDS_OPTIONS6 >::type options; + //@endcond + + public: + typedef T value_type ; ///< Value type + typedef typename options::allocator::template rebind::other allocator_type ; ///< allocator type + + protected: + //@cond + queue_type m_Queue; + //@endcond + + public: + /// Constructs empty pool + lazy_vyukov_queue_pool( size_t nCapacity = 0 ) + : m_Queue( nCapacity ) + {} + + /// Deallocates all objects from the pool + ~lazy_vyukov_queue_pool() + { + allocator_type a; + while ( !m_Queue.empty() ) { + value_type * p = m_Queue.pop(); + a.deallocate( p, 1 ); + } + } + + /// Allocates an object from pool + /** + The pool supports allocation only single object (\p n = 1). + If \p n > 1 the behaviour is undefined. + + If the queue is not empty, the popped value is returned. + Otherwise, a new value allocated. + */ + value_type * allocate( size_t n ) + { + assert( n == 1 ); + + value_type * p = m_Queue.pop(); + if ( p ) + return p; + + return allocator_type().allocate( n ); + } + + /// Deallocated the object \p p + /** + The pool supports allocation only single object (\p n = 1). + If \p n > 1 the behaviour is undefined. + + If the queue is not full, \p p is pushed into the queue. + Otherwise, \p p is deallocated by allocator provided. + */ + void deallocate( value_type * p, size_t n ) + { + assert( n == 1 ); + + if ( p ) { + if ( !m_Queue.push( *p )) + allocator_type().deallocate( p, n ); + } + } + + }; + + /// Bounded free-list based on bounded lock-free queue cds::intrusive::VyukovMPMCCycleQueue + /** @ingroup cds_memory_pool + Template parameters: + - \p T - the type of object maintaining by free-list + - \p Options - the options of cds::intrusive::VyukovMPMCCycleQueue class plus + cds::opt::allocator option. + + \b Internals + + At construction time, the free-list allocates the array of N items + and stores them into queue, where N is the queue capacity. + When allocating the free-list tries to pop an object from + internal queue i.e. from preallocated pool. If success the popped object is returned. + Otherwise a \p std::bad_alloc exception is raised. + So, the pool can contain up to \p N items. + When deallocating, the object is pushed into queue. + In debug mode the \ref deallocate member function asserts + that the pointer is from preallocated pool. + + \b Usage + + \p %bounded_vyukov_queue_pool should be used together with \ref pool_allocator. + You should declare an static object of type \p %bounded_vyukov_queue_pool, provide + an accessor functor to this object and use \p pool_allocator as an allocator: + \code + #include + #include + + // Pool of Foo object of size 1024. + typedef cds::memory::bounded_vyukov_queue_pool< Foo, cds::opt::buffer< cds::opt::v::static_buffer< Foo, 1024 > > pool_type; + static pool_type thePool; + + struct pool_accessor { + typedef typename pool_type::value_type value_type; + + pool_type& operator()() const + { + return thePool; + } + }; + + // Declare pool allocator + typedef cds::memory::pool_allocator< Foo, pool_accessor > pool_allocator; + + // Use pool_allocator + // Allocate an object + Foo * p = pool_allocator().allocate( 1 ); + + // construct object + new(p) Foo; + + //... + + // Destruct object + p->~Foo(); + + // Deallocate object + pool_allocator().deallocate( p , 1 ); + \endcode + */ + template + class bounded_vyukov_queue_pool + { + public: + typedef cds::intrusive::VyukovMPMCCycleQueue< T, CDS_OPTIONS6 > queue_type ; ///< Queue type + + protected: + //@cond + struct default_options: public queue_type::options + { + typedef CDS_DEFAULT_ALLOCATOR allocator; + }; + typedef typename opt::make_options< default_options, CDS_OPTIONS6 >::type options; + //@endcond + + public: + typedef T value_type ; ///< Value type + typedef typename options::allocator::template rebind::other allocator_type ; ///< allocator type + + protected: + //@cond + queue_type m_Queue; + value_type * m_pFirst; + value_type * m_pLast; + //@endcond + + protected: + //@cond + void preallocate_pool() + { + m_pFirst = allocator_type().allocate( m_Queue.capacity() ); + m_pLast = m_pFirst + m_Queue.capacity(); + + for ( value_type * p = m_pFirst; p < m_pLast; ++p ) + CDS_VERIFY( m_Queue.push( *p )) ; // must be true + } + + bool from_pool( value_type * p ) const + { + return m_pFirst <= p && p < m_pLast; + } + //@endcond + + public: + /// Preallocates the pool of object + /** + \p nCapacity argument is the queue capacity. It should be passed + if the queue is based on dynamically-allocated buffer. + See cds::intrusive::VyukovMPMCCycleQueue for explanation. + */ + bounded_vyukov_queue_pool( size_t nCapacity = 0 ) + : m_Queue( nCapacity ) + { + preallocate_pool(); + } + + /// Deallocates the pool. + ~bounded_vyukov_queue_pool() + { + m_Queue.clear(); + allocator_type().deallocate( m_pFirst, m_Queue.capacity() ); + } + + /// Allocates an object from pool + /** + The pool supports allocation only single object (\p n = 1). + If \p n > 1 the behaviour is undefined. + + If the queue is not empty, the popped value is returned. + Otherwise, a \p std::bad_alloc exception is raised. + */ + value_type * allocate( size_t n ) + { + assert( n == 1 ); + + value_type * p = m_Queue.pop(); + if ( p ) { + assert( from_pool(p) ); + return p; + } + + throw std::bad_alloc(); + } + + /// Deallocated the object \p p + /** + The pool supports allocation only single object (\p n = 1). + If \p n > 1 the behaviour is undefined. + + \p should be from preallocated pool. + */ + void deallocate( value_type * p, size_t n ) + { + assert( n == 1 ); + + if ( p ) { + assert( from_pool( p )); + m_Queue.push( *p ); + } + } + }; + + +}} // namespace cds::memory + + +#endif // #ifndef __CDS_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H diff --git a/cds/numtraits.h b/cds/numtraits.h new file mode 100644 index 00000000..89f5aa76 --- /dev/null +++ b/cds/numtraits.h @@ -0,0 +1,247 @@ +//$$CDS-header$$ + +#ifndef __CDS_NUMERIC_TRAITS_H +#define __CDS_NUMERIC_TRAITS_H + +/* + Filename: numtraits.h + Created 2007.04.22 by Maxim.Khiszinsky + + Description: + Various numeric constants and algorithms + Many algorithms are static (compile-time) + Result of static algorithm is the constant (enum) called "result". + + Editions: + 2007.04.22 Maxim.Khiszinsky Created + 2007.07.20 Maxim.Khiszinsky Added functions: exponent2, exp2Ceil +*/ + +namespace cds { + /// Some helper compile-time tricks + namespace beans { + + // @cond details + namespace details { + template struct Exponent2Helper; + template struct Exponent2Helper< N, 0 > { + enum { result = Exponent2Helper< N / 2, N % 2 >::result + 1 }; + }; + template <> struct Exponent2Helper< 1, 0 > { + enum { result = 0 }; + }; + } + // @endcond + + /*! Compile-time computing of log2(N) + + If N = 2**k for some natural k then Exponent2::result = k + If N != 2**k for any natural k then compile-time error has been encountered + */ + template struct Exponent2 { + enum { + native = N, + base = 2, + result = details::Exponent2Helper< N / 2, N % 2 >::result + 1 + }; + }; + //@cond details + template <> struct Exponent2<1> { + enum { + native = 1, + base = 2, + result = 0 + }; + }; + //@endcond + + //TODO - deprecated. Use is_power2 from int_algo.h + /// A tricky runtime algorithm to ensure that @p n is power of 2 + static inline bool isExp2( size_t n ) + { + return(n & (n - 1)) == 0 && n; + } + + //TODO: deprecated. Use log2 from int_algo.h + /// Runtime algorithm to compute log2( @p nTest ). If @p nTest is not power of two then -1 returns + static inline int exponent2( size_t nTest ) + { + int nExp = -1; + size_t nMask = 1; + for ( size_t n = 0; n < CDS_BUILD_BITS; n++ ) { + if ( nTest & nMask ) { + if ( nExp == -1 ) + nExp = (int) n; + else + return -1 ; // nTest íå ÿâëÿåòñÿ ñòåïåíüþ äâîéêè + } + nMask = nMask << 1; + } + return nExp; + } + + /// Returns @a N: 2**N is nearest to @p nNumber, 2**N < nNumber + static inline size_t exp2Ceil( size_t nNumber ) + { + static_assert( sizeof(size_t) == (CDS_BUILD_BITS / 8), "Internal assumption error" ); + + size_t nExp = 0; + size_t nBit = CDS_BUILD_BITS - 1; +#if CDS_BUILD_BITS == 32 + size_t nMask = 0x80000000; +#else + size_t nMask = 0x8000000000000000; +#endif + while ( nMask != 0 ) { + if ( nNumber & nMask ) { + nExp = nBit; + break; + } + nMask = nMask >> 1; + --nBit; + } + if ( ( nNumber % ( ((size_t) 1) << nExp )) > ( ((size_t) 1) << (nExp - 1)) ) + ++nExp; + return nExp; + } + + /* ExponentN< int BASE, int N > + Exponent + If N = BASE**k then the algorithm returns k + Else compile-time error is encountered + */ + //@cond details + namespace details { + template struct ExponentNHelper; + template struct ExponentNHelper< N, BASE, 0 > { + enum { result = ExponentNHelper< N / BASE, BASE, N % BASE >::result + 1 }; + }; + template struct ExponentNHelper< 1, BASE, 0 > { + enum { result = 0 }; + }; + } + //@endcond + + /// Compile-time computing log(@p N) based @p BASE. Result in @a Exponent::result + template struct ExponentN { + enum { + native = N, + base = BASE, + result = details::ExponentNHelper< N / BASE, BASE, N % BASE >::result + 1 + }; + }; + //@cond + template struct ExponentN< BASE, 1 > { + enum { + native = 1, + base = BASE, + result = 0 + }; + }; + template struct ExponentN< BASE, 0 >; + //@endcond + + //@cond none + template struct Power2 { + enum { + exponent = N, + result = 1 << N + }; + }; + template <> struct Power2<0> { + enum { + exponent = 0, + result = 1 + }; + }; + //@endcond + + //@cond none + template struct PowerN { + enum { + exponent = N, + base = BASE, + result = PowerN< BASE, N - 1 >::result * BASE + }; + }; + template struct PowerN { + enum { + exponent = 0, + base = BASE, + result = 1 + }; + }; + //@endcond + + //@cond none + namespace details { + template struct NearestCeilHelper { + enum { result = N + ALIGN - MOD }; + }; + template struct NearestCeilHelper< N, ALIGN, 0> { + enum { result = N }; + }; + } + template struct NearestCeil { + enum { + native = N, + align = ALIGN, + result = details::NearestCeilHelper< N, ALIGN, N % ALIGN >::result + }; + }; + //@endcond + + //@cond none + template struct AlignedSize { + typedef T NativeType; + enum { + nativeSize = sizeof(T), + result = NearestCeil< sizeof(T), ALIGN >::result, + alignBytes = result - nativeSize, + alignedSize = result + }; + }; + //@endcond + + //@cond none + namespace details { + template < int N1, int N2, bool LESS > struct Max; + template < int N1, int N2 > + struct Max< N1, N2, true > { + enum { result = N2 }; + }; + + template < int N1, int N2 > + struct Max< N1, N2, false > { + enum { result = N1 }; + }; + + template < int N1, int N2, bool LESS > struct Min; + template < int N1, int N2 > + struct Min< N1, N2, true > { + enum { result = N1 }; + }; + + template < int N1, int N2 > + struct Min< N1, N2, false > { + enum { result = N2 }; + }; + } + //@endcond + + /// Returns max(N1, N2) as Max::result + template + struct Max { + enum { result = details::Max< N1, N2, N1 < N2 >::result }; + }; + + /// Returns min(N1, N2) as Min::result + template + struct Min { + enum { result = details::Min< N1, N2, N1 < N2 >::result }; + }; + + } // namespace beans +} // namespace cds + +#endif // __CDS_NUMERIC_TRAITS_H diff --git a/cds/opt/buffer.h b/cds/opt/buffer.h new file mode 100644 index 00000000..fe1d0815 --- /dev/null +++ b/cds/opt/buffer.h @@ -0,0 +1,243 @@ +//$$CDS-header$$ + +#ifndef __CDS_OPT_BUFFER_H +#define __CDS_OPT_BUFFER_H + +#include +#include +#include +#include + +namespace cds { namespace opt { + + /// [type-option] Option setter for user-provided plain buffer + /** + This option is used by some container as a random access array for storing + container's item; for example, a bounded queue may use + this option to define underlying buffer implementation. + + The template parameter \p Type should be rebindable. + + Implementations: + - opt::v::static_buffer + - opt::v::dynamic_buffer + */ + template + struct buffer { + //@cond + template struct pack: public Base + { + typedef Type buffer; + }; + //@endcond + }; + + namespace v { + + /// Static buffer (\ref opt::buffer option) + /** + One of available type for opt::buffer type-option. + + This buffer maintains static array. No dynamic memory allocation performed. + + \par Template parameters: + - \p T - item type the buffer stores + - \p Capacity - the capacity of buffer. The value must be power of two if \p Exp2 is \p true + - \p Exp2 - a boolean flag. If it is \p true the buffer capacity must be power of two. + Otherwise it can be any positive number. Usually, it is required that the buffer has + size of a power of two. + */ + template + class static_buffer + { + public: + typedef T value_type ; ///< value type + static const size_t c_nCapacity = Capacity ; ///< Capacity + static const bool c_bExp2 = Exp2; ///< \p Exp2 flag + + /// Rebind buffer for other template parameters + template + struct rebind { + typedef static_buffer other ; ///< Rebind result type + }; + private: + //@cond + value_type m_buffer[c_nCapacity]; + //@endcond + public: + /// Construct static buffer + static_buffer() + { + // Capacity must be power of 2 + static_assert( !c_bExp2 || (c_nCapacity & (c_nCapacity - 1)) == 0, "Capacity must be power of two" ); + } + /// Construct buffer of given capacity + /** + This ctor ignores \p nCapacity argument. The capacity of static buffer + is defined by template argument \p Capacity + */ + static_buffer( size_t nCapacity ) + { + // Capacity must be power of 2 + static_assert( !c_bExp2 || (c_nCapacity & (c_nCapacity - 1)) == 0, "Capacity must be power of two"); + //assert( c_nCapacity == nCapacity ); + } + + /// Get item \p i + value_type& operator []( size_t i ) + { + assert( i < capacity() ); + return m_buffer[i]; + } + + /// Get item \p i, const version + const value_type& operator []( size_t i ) const + { + assert( i < capacity() ); + return m_buffer[i]; + } + + /// Returns buffer capacity + CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT + { + return c_nCapacity; + } + + /// Zeroize the buffer + void zeroize() + { + memset( m_buffer, 0, capacity() * sizeof(m_buffer[0]) ); + } + + /// Returns pointer to buffer array + value_type * buffer() + { + return m_buffer; + } + + /// Returns pointer to buffer array (const version) + value_type * buffer() const + { + return m_buffer; + } + + private: + //@cond + // non-copyable + static_buffer(const static_buffer&); + void operator =(const static_buffer&); + //@endcond + }; + + + /// Dynamically allocated buffer + /** + One of available opt::buffer type-option. + + This buffer maintains dynamically allocated array. + Allocation is performed at construction time. + + \par Template parameters: + - \p T - item type storing in the buffer + - \p Alloc - an allocator used for allocating internal buffer (\p std::allocator interface) + - \p Exp2 - a boolean flag. If it is \p true the buffer capacity must be power of two. + Otherwise it can be any positive number. Usually, it is required that the buffer has + size of a power of two. + */ + template + class dynamic_buffer + { + public: + typedef T value_type ; ///< Value type + static CDS_CONSTEXPR_CONST bool c_bExp2 = Exp2; ///< \p Exp2 flag + + /// Rebind buffer for other template parameters + template + struct rebind { + typedef dynamic_buffer other ; ///< Rebinding result type + }; + + //@cond + typedef cds::details::Allocator allocator_type; + //@endcond + + private: + //@cond + value_type * m_buffer; + size_t const m_nCapacity; + //@endcond + public: + /// Allocates dynamic buffer of given \p nCapacity + /** + If \p Exp2 class template parameter is \p true then actual capacity + of allocating buffer is nearest upper to \p nCapacity power of two. + */ + dynamic_buffer( size_t nCapacity ) + : m_nCapacity( c_bExp2 ? beans::ceil2(nCapacity) : nCapacity ) + { + assert( m_nCapacity >= 2 ); + // Capacity must be power of 2 + assert( !c_bExp2 || (m_nCapacity & (m_nCapacity - 1)) == 0 ); + + allocator_type a; + m_buffer = a.NewArray( m_nCapacity ); + } + + /// Destroys dynamically allocated buffer + ~dynamic_buffer() + { + allocator_type a; + a.Delete( m_buffer, m_nCapacity ); + } + + /// Get item \p i + value_type& operator []( size_t i ) + { + assert( i < capacity() ); + return m_buffer[i]; + } + + /// Get item \p i, const version + const value_type& operator []( size_t i ) const + { + assert( i < capacity() ); + return m_buffer[i]; + } + + /// Returns buffer capacity + size_t capacity() const CDS_NOEXCEPT + { + return m_nCapacity; + } + + /// Zeroize the buffer + void zeroize() + { + memset( m_buffer, 0, capacity() * sizeof(m_buffer[0]) ); + } + + /// Returns pointer to buffer array + value_type * buffer() + { + return m_buffer; + } + + /// Returns pointer to buffer array (const version) + value_type * buffer() const + { + return m_buffer; + } + + private: + //@cond + // non-copyable + dynamic_buffer(const dynamic_buffer&); + void operator =(const dynamic_buffer&); + //@endcond + }; + + } // namespace v + +}} // namespace cds::opt + +#endif // #ifndef __CDS_OPT_BUFFER_H diff --git a/cds/opt/compare.h b/cds/opt/compare.h new file mode 100644 index 00000000..f18940f3 --- /dev/null +++ b/cds/opt/compare.h @@ -0,0 +1,264 @@ +//$$CDS-header$$ + +#ifndef __CDS_OPT_COMPARE_H +#define __CDS_OPT_COMPARE_H + +/* + Editions: + 2011.05.05 khizmax Created +*/ + +#include + +#include +#include +#include + +namespace cds { namespace opt { + + /// [type-option] Option setter for key comparing + /** + The option sets a type of a functor to compare keys. + For comparing two keys \p k1 and \p k2 the functor must return: + - 1 if k1 > k2 + - 0 if k1 == k2 + - -1 if k1 < k2 + + \p Functor is a functor with following interface: + \code + template + struct Comparator { + int operator ()(const T& r1, const T& r2) + { + // Comparator body + } + }; + \endcode + Note that the functor must return \p int, not a \p bool value. + + There are predefined type for \p Functor: + - the functor v::less_comparator that implements comparing functor through \p std::less predicate. + - the specialization of v::less_comparator functor intended for the string comparison + + You may implement your own comparing functor that satisfies \p Functor interface. + + About relation between \ref opt::less and \ref opt::compare option setters see opt::less description. + */ + template + struct compare { + //@cond + template struct pack: public Base + { + typedef Functor compare; + }; + //@endcond + }; + + namespace v { + + /// Comparator based on \p std::less predicate + /** + This functor is predefined type for \p opt::compare option setter. + It is based on \p std::less predicate. + */ + template + struct less_comparator { + /// Operator that compares two value of type \p T + int operator()(T const& v1, T const& v2) + { + if ( std::less()( v1, v2 ) ) + return -1; + if ( std::less()( v2, v1 )) + return 1; + return 0; + } + }; + + /// Comparator specialization for \p std::string + /** + This functor uses \p std::string::compare method instead of \p std::less predicate. + */ + template + struct less_comparator< std::basic_string > + { + //@cond + typedef std::basic_string string_type; + int operator()(string_type const& v1, string_type const& v2) + { + return v1.compare( v2 ); + } + //@endcond + }; + } // namespace v + + /// [type-option] Option setter for \p less predicate + /** + The option sets a binary predicate that tests whether a value of a specified type is less than another value of that type. + \p Functor interface is similar to \p std::less predicate interface. + The standard predicate \p std::less can act as \p Functor: + \code typedef cds::opt::less< std::less< int > > opt_less \endcode + + In addition, the option setter may sets non-standard 2-type predicate (\p std::binary_function): + \code + + struct foo { + int n; + }; + + template + struct pred_less { + bool operator ()( const T& t, const Q& q ) + { return t.n < q ; } + bool operator ()( const Q& q, const T& t ) + { return q < t.n ; } + bool operator ()( const T& t1, const T& t2 ) + { return t1.n < t2.n ; } + bool operator ()( const Q& q1, const Q& q2 ) + { return q1 < q2 ; } + }; + + typedef cds::opt::less< pred_less< foo, int > > opt_less; + \endcode + + Generally, the default type for \p Functor is \p std::less but it depends on the container used. + + \par Relation between \p opt::less and opt::compare option setters + Unless otherwise specified, \p compare option setter has high priority. If opt::compare and opt::less options are specified + for a container, the opt::compare option is used: + \code + // Suppose, hypothetical map_type allows to specify + // cds::opt::less and cds::opt::compare options + + typedef map_type< std::string, int, + cds::opt::compare< cds::opt::v::less_comparator< std::string > >, + cds::opt::less< std::less< std::string > > + > my_map_type; + + // For my_map_type, the cds::opt::compare comparator will be used, + // the cds::opt::less option is ignored without any warnings. + \endcode + */ + template + struct less { + //@cond + template struct pack: public Base + { + typedef Functor less; + }; + //@endcond + }; + + //@cond + namespace details { + template + struct make_comparator_from_less + { + typedef Less less_functor; + + template + int operator ()( T const& t, Q const& q ) const + { + less_functor f; + if ( f( t, q ) ) + return -1; + if ( f( q, t ) ) + return 1; + return 0; + } + }; + + template + struct make_comparator + { + typedef typename Traits::compare compare; + typedef typename Traits::less less; + + typedef typename std::conditional< + std::is_same< compare, opt::none >::value, + typename std::conditional< + std::is_same< less, opt::none >::value, + typename std::conditional< Forced, make_comparator_from_less< std::less >, opt::none >::type, + make_comparator_from_less< less > + >::type, + compare + >::type type; + }; + + template + struct make_comparator_from_option_list + { + struct default_traits { + typedef opt::none compare; + typedef opt::none less; + }; + + typedef typename make_comparator< T, + typename opt::make_options< + typename opt::find_type_traits< default_traits, CDS_OPTIONS >::type + ,CDS_OPTIONS + >::type + >::type type; + }; + } // namespace details + //@endcond + + /// [type-option] Option setter for \p equal_to predicate + /** + The option sets a binary predicate that tests whether a value of a specified type is equal to another value of that type. + \p Functor interface is similar to \p std::equal_to predicate interface. + The standard predicate \p std::equal_to can act as \p Functor: + \code typedef cds::opt::equal_to< std::equal_to< int > > opt_equal_to \endcode + + In addition, the option setter may sets non-standard 2-type (or even N-type) predicate (\p std::binary_function): + \code + + struct foo { + int n; + }; + + template + struct pred_equal_to { + bool operator ()( const T& t, const Q& q ) + { return t.n == q ; } + bool operator ()( const Q& q, const T& t ) + { return q == t.n ; } + bool operator ()( const T& t1, const T& t2 ) + { return t1.n == t2.n ; } + bool operator ()( const Q& q1, const Q& q2 ) + { return q1 == q2 ; } + }; + + typedef cds::opt::equal_to< pred_equal_to< foo, int > > opt_equal_to; + \endcode + + Generally, the default type for \p Functor is \p std::equal_to but it depends on the container used. + */ + template + struct equal_to { + //@cond + template struct pack: public Base + { + typedef Functor equal_to; + }; + //@endcond + }; + + //@cond + namespace details { + template + struct make_equal_to + { + typedef typename Traits::equal_to equal_to; + + typedef typename std::conditional< + std::is_same< equal_to, opt::none >::value, + typename std::conditional< Forced, std::equal_to, opt::none >::type, + equal_to + >::type type; + }; + } + //@endcond + +}} // namespace cds::opt + +#endif // #ifndef __CDS_OPT_COMPARE_H diff --git a/cds/opt/hash.h b/cds/opt/hash.h new file mode 100644 index 00000000..643f52d3 --- /dev/null +++ b/cds/opt/hash.h @@ -0,0 +1,571 @@ +//$$CDS-header$$ + +#ifndef __CDS_OPT_HASH_H +#define __CDS_OPT_HASH_H + +#include +#include +#include + +namespace cds { namespace opt { + + /// [type-option] Option setter for a hash function + /** + This option setter specifies hash functor used in unordered containers. + + The default value of template argument \p Functor is \p cds::opt::v::hash + that is synonym for std::hash implementation of standard library. + If standard C++ library of the compiler you use does not provide TR1 implementation + the \p cds library automatically selects boost::hash. + */ + template + struct hash { + //@cond + template struct pack: public Base + { + typedef Functor hash; + }; + //@endcond + }; + + namespace v { + //@cond + using cds::details::hash; + + /// Metafunction selecting default hash implementation + /** + The metafunction selects appropriate hash functor implementation. + If \p Hash is not equal to opt::none, then result of metafunction is \p Hash. + Otherwise, the result is std::hash or boost::hash + depending of compiler you use. + + Note that default hash function like std::hash or boost::hash + is generally not suitable for complex type \p Q and its derivatives. + You should manually provide particular hash functor for such types. + */ + template + struct hash_selector + { + typedef Hash type ; ///< resulting implementation of hash functor + }; + + template <> + struct hash_selector + { + struct type { + template + size_t operator()( Q const& key ) const + { + return hash()( key ); + } + }; + }; + //@endcond + } // namespace v + +#ifdef CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT + //@cond + namespace details { + template struct hash_list; + template + struct hash_list< std::tuple > + { + static size_t const size = sizeof...(Functors); + typedef size_t values[size]; + typedef std::tuple hash_tuple_type; + + hash_tuple_type hash_tuple; + + hash_list() + {} + + hash_list( hash_tuple_type const& t) + : hash_tuple( t ) + {} +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + hash_list( hash_tuple_type&& t) + : hash_tuple( std::forward(t) ) + {} +# endif + + template + typename std::enable_if< (I == sizeof...(Functors)) >::type apply( size_t * dest, T const& v ) const + {} + + template + typename std::enable_if< (I < sizeof...(Functors)) >::type apply( size_t * dest, T const& v ) const + { + dest[I] = std::get( hash_tuple )( v ); + apply( dest, v ); + } + + template + void operator()( size_t * dest, T const& v ) const + { + apply<0>( dest, v ); + } + }; + } // namespace details + //@endcond + + //@cond + // At least, two functors must be provided. Single functor is not supported +//#if CDS_COMPILER != CDS_COMPILER_INTEL + // Intel C++ compiler does not support + template struct hash< std::tuple >; +//#endif + //@endcond + + /// Multi-functor hash option setter - specialization for std::tuple + template + struct hash< std::tuple > + { +//# if CDS_COMPILER == CDS_COMPILER_INTEL + //static_assert( sizeof...(Functors) > 1, "At least, two functors must be provided. Single functor is not supported" ); +//# endif + //@cond + template struct pack: public Base + { + typedef details::hash_list< std::tuple > hash; + }; + //@endcond + }; + +#else // no variadic template support + namespace details { + template struct hash_list; + template + struct hash_list< std::tuple > + { + static size_t const size = 2; + typedef size_t values[size]; + typedef std::tuple hash_tuple_type; + + hash_tuple_type hash_tuple; + + hash_list() + {} + hash_list( hash_tuple_type const& t) + : hash_tuple( t ) + {} +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + hash_list( hash_tuple_type&& t) + : hash_tuple( t ) + {} +# endif + + template + void operator()( size_t * dest, T const& v ) const + { + dest[0] = std::get<0>( hash_tuple )( v ); + dest[1] = std::get<1>( hash_tuple )( v ); + } + }; + + template + struct hash_list< std::tuple > + { + static size_t const size = 3; + typedef size_t values[size]; + typedef std::tuple hash_tuple_type; + + hash_tuple_type hash_tuple; + + hash_list() + {} + hash_list( hash_tuple_type const& t) + : hash_tuple( t ) + {} +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + hash_list( hash_tuple_type&& t) + : hash_tuple( t ) + {} +# endif + + template + void operator()( size_t * dest, T const& v ) const + { + dest[0] = std::get<0>( hash_tuple )( v ); + dest[1] = std::get<1>( hash_tuple )( v ); + dest[2] = std::get<2>( hash_tuple )( v ); + } + }; + + template + struct hash_list< std::tuple > + { + static size_t const size = 4; + typedef size_t values[size]; + typedef std::tuple hash_tuple_type; + + hash_tuple_type hash_tuple; + + hash_list() + {} + hash_list( hash_tuple_type const& t) + : hash_tuple( t ) + {} +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + hash_list( hash_tuple_type&& t) + : hash_tuple( t ) + {} +# endif + + template + void operator()( size_t * dest, T const& v ) const + { + dest[0] = std::get<0>( hash_tuple )( v ); + dest[1] = std::get<1>( hash_tuple )( v ); + dest[2] = std::get<2>( hash_tuple )( v ); + dest[3] = std::get<3>( hash_tuple )( v ); + } + }; + + template + struct hash_list< std::tuple > + { + static size_t const size = 5; + typedef size_t values[size]; + typedef std::tuple hash_tuple_type; + + hash_tuple_type hash_tuple; + + hash_list() + {} + hash_list( hash_tuple_type const& t) + : hash_tuple( t ) + {} +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + hash_list( hash_tuple_type&& t) + : hash_tuple( t ) + {} +# endif + + template + void operator()( size_t * dest, T const& v ) const + { + dest[0] = std::get<0>( hash_tuple )( v ); + dest[1] = std::get<1>( hash_tuple )( v ); + dest[2] = std::get<2>( hash_tuple )( v ); + dest[3] = std::get<3>( hash_tuple )( v ); + dest[4] = std::get<4>( hash_tuple )( v ); + } + }; + + template + struct hash_list< std::tuple > + { + static size_t const size = 6; + typedef size_t values[size]; + typedef std::tuple hash_tuple_type; + + hash_tuple_type hash_tuple; + + hash_list() + {} + hash_list( hash_tuple_type const& t) + : hash_tuple( t ) + {} +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + hash_list( hash_tuple_type&& t) + : hash_tuple( t ) + {} +# endif + + template + void operator()( size_t * dest, T const& v ) const + { + dest[0] = std::get<0>( hash_tuple )( v ); + dest[1] = std::get<1>( hash_tuple )( v ); + dest[2] = std::get<2>( hash_tuple )( v ); + dest[3] = std::get<3>( hash_tuple )( v ); + dest[4] = std::get<4>( hash_tuple )( v ); + dest[5] = std::get<5>( hash_tuple )( v ); + } + }; + + template + struct hash_list< std::tuple > + { + static size_t const size = 7; + typedef size_t values[size]; + typedef std::tuple hash_tuple_type; + + hash_tuple_type hash_tuple; + + hash_list() + {} + hash_list( hash_tuple_type const& t) + : hash_tuple( t ) + {} +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + hash_list( hash_tuple_type&& t) + : hash_tuple( t ) + {} +# endif + + template + void operator()( size_t * dest, T const& v ) const + { + dest[0] = std::get<0>( hash_tuple )( v ); + dest[1] = std::get<1>( hash_tuple )( v ); + dest[2] = std::get<2>( hash_tuple )( v ); + dest[3] = std::get<3>( hash_tuple )( v ); + dest[4] = std::get<4>( hash_tuple )( v ); + dest[5] = std::get<5>( hash_tuple )( v ); + dest[6] = std::get<6>( hash_tuple )( v ); + } + }; + + template + struct hash_list< std::tuple > + { + static size_t const size = 8; + typedef size_t values[size]; + typedef std::tuple hash_tuple_type; + + hash_tuple_type hash_tuple; + + hash_list() + {} + hash_list( hash_tuple_type const& t) + : hash_tuple( t ) + {} +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + hash_list( hash_tuple_type&& t) + : hash_tuple( t ) + {} +# endif + + template + void operator()( size_t * dest, T const& v ) const + { + dest[0] = std::get<0>( hash_tuple )( v ); + dest[1] = std::get<1>( hash_tuple )( v ); + dest[2] = std::get<2>( hash_tuple )( v ); + dest[3] = std::get<3>( hash_tuple )( v ); + dest[4] = std::get<4>( hash_tuple )( v ); + dest[5] = std::get<5>( hash_tuple )( v ); + dest[6] = std::get<6>( hash_tuple )( v ); + dest[7] = std::get<7>( hash_tuple )( v ); + } + }; + +#if !((CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL) && _MSC_VER == 1700) + // MSVC 11: max count of argument is 8 + + template + struct hash_list< std::tuple > + { + static size_t const size = 9; + typedef size_t values[size]; + typedef std::tuple hash_tuple_type; + + hash_tuple_type hash_tuple; + + hash_list() + {} + hash_list( hash_tuple_type const& t) + : hash_tuple( t ) + {} +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + hash_list( hash_tuple_type&& t) + : hash_tuple( t ) + {} +# endif + + template + void operator()( size_t * dest, T const& v ) const + { + dest[0] = std::get<0>( hash_tuple )( v ); + dest[1] = std::get<1>( hash_tuple )( v ); + dest[2] = std::get<2>( hash_tuple )( v ); + dest[3] = std::get<3>( hash_tuple )( v ); + dest[4] = std::get<4>( hash_tuple )( v ); + dest[5] = std::get<5>( hash_tuple )( v ); + dest[6] = std::get<6>( hash_tuple )( v ); + dest[7] = std::get<7>( hash_tuple )( v ); + dest[8] = std::get<8>( hash_tuple )( v ); + } + }; + + template + struct hash_list< std::tuple > + { + static size_t const size = 10; + typedef size_t values[size]; + typedef std::tuple hash_tuple_type; + + hash_tuple_type hash_tuple; + + hash_list() + {} + hash_list( hash_tuple_type const& t) + : hash_tuple( t ) + {} +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + hash_list( hash_tuple_type&& t) + : hash_tuple( t ) + {} +# endif + + template + void operator()( size_t * dest, T const& v ) const + { + dest[0] = std::get<0>( hash_tuple )( v ); + dest[1] = std::get<1>( hash_tuple )( v ); + dest[2] = std::get<2>( hash_tuple )( v ); + dest[3] = std::get<3>( hash_tuple )( v ); + dest[4] = std::get<4>( hash_tuple )( v ); + dest[5] = std::get<5>( hash_tuple )( v ); + dest[6] = std::get<6>( hash_tuple )( v ); + dest[7] = std::get<7>( hash_tuple )( v ); + dest[8] = std::get<8>( hash_tuple )( v ); + dest[9] = std::get<9>( hash_tuple )( v ); + } + }; +#endif + } // namespace details + + template< typename F1, typename F2 > + struct hash< std::tuple< F1, F2 > > + { + //@cond + template struct pack: public Base + { + typedef details::hash_list< std::tuple > hash; + }; + //@endcond + }; + template< typename F1, typename F2, typename F3 > + struct hash< std::tuple< F1, F2, F3 > > + { + //@cond + template struct pack: public Base + { + typedef details::hash_list< std::tuple > hash; + }; + //@endcond + }; + template< typename F1, typename F2, typename F3, typename F4 > + struct hash< std::tuple< F1, F2, F3, F4 > > + { + //@cond + template struct pack: public Base + { + typedef details::hash_list< std::tuple > hash; + }; + //@endcond + }; + template< typename F1, typename F2, typename F3, typename F4, typename F5 > + struct hash< std::tuple< F1, F2, F3, F4, F5 > > + { + //@cond + template struct pack: public Base + { + typedef details::hash_list< std::tuple > hash; + }; + //@endcond + }; + template< typename F1, typename F2, typename F3, typename F4, typename F5, typename F6 > + struct hash< std::tuple< F1, F2, F3, F4, F5, F6 > > + { + //@cond + template struct pack: public Base + { + typedef details::hash_list< std::tuple > hash; + }; + //@endcond + }; + template< typename F1, typename F2, typename F3, typename F4, typename F5, typename F6, typename F7 > + struct hash< std::tuple< F1, F2, F3, F4, F5, F6, F7 > > + { + //@cond + template struct pack: public Base + { + typedef details::hash_list< std::tuple > hash; + }; + //@endcond + }; + template< typename F1, typename F2, typename F3, typename F4, typename F5, typename F6, typename F7, typename F8 > + struct hash< std::tuple< F1, F2, F3, F4, F5, F6, F7, F8 > > + { + //@cond + template struct pack: public Base + { + typedef details::hash_list< std::tuple > hash; + }; + //@endcond + }; + +#if !((CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL) && _MSC_VER == 1700) + // MSVC 11: max count of argument is 8 + + template< typename F1, typename F2, typename F3, typename F4, typename F5, typename F6, typename F7, typename F8, typename F9 > + struct hash< std::tuple< F1, F2, F3, F4, F5, F6, F7, F8, F9 > > + { + //@cond + template struct pack: public Base + { + typedef details::hash_list< std::tuple > hash; + }; + //@endcond + }; + template< typename F1, typename F2, typename F3, typename F4, typename F5, typename F6, typename F7, typename F8, typename F9, + typename F10 > + struct hash< std::tuple< F1, F2, F3, F4, F5, F6, F7, F8, F9, F10 > > + { + //@cond + template struct pack: public Base + { + typedef details::hash_list< std::tuple > hash; + }; + //@endcond + }; +#endif // !MSVC11 +#endif // #ifdef CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT + + //@cond + namespace details { + + template + struct hash_list_wrapper { + typedef HashList hash_list; + typedef WrappedType wrapped_type; + typedef Wrapper wrapper_type; + + typedef typename hash_list::hash_tuple_type hash_tuple_type; + static size_t const size = hash_list::size; + + hash_list m_wrappedList; + + hash_list_wrapper() + {} + hash_list_wrapper( hash_tuple_type const& t) + : m_wrappedList( t ) + {} +# ifdef CDS_MOVE_SEMANTICS_SUPPORT + hash_list_wrapper( hash_tuple_type&& t) + : m_wrappedList( std::forward(t) ) + {} +# endif + + void operator()( size_t * dest, wrapped_type const& what ) const + { + m_wrappedList( dest, wrapper_type()( what )); + } + + template + void operator()( size_t * dest, Q const& what) const + { + m_wrappedList( dest, what ); + } + }; + + } // namespace details + //@endcond + +}} // namespace cds::opt + +#endif // #ifndef __CDS_OPT_HASH_H diff --git a/cds/opt/make_options_std.h b/cds/opt/make_options_std.h new file mode 100644 index 00000000..77260d31 --- /dev/null +++ b/cds/opt/make_options_std.h @@ -0,0 +1,535 @@ +//$$CDS-header$$ + +#ifndef __CDS_OPT_MAKE_OPTIONS_STD_H +#define __CDS_OPT_MAKE_OPTIONS_STD_H + +#ifndef __CDS_OPT_OPTIONS_H +# error must be included instead of +#endif + +#include + +// @cond + +#define CDS_DECL_OPTIONS1 typename O1=cds::opt::none +#define CDS_DECL_OPTIONS2 CDS_DECL_OPTIONS1,typename O2=cds::opt::none +#define CDS_DECL_OPTIONS3 CDS_DECL_OPTIONS2,typename O3=cds::opt::none +#define CDS_DECL_OPTIONS4 CDS_DECL_OPTIONS3,typename O4=cds::opt::none +#define CDS_DECL_OPTIONS5 CDS_DECL_OPTIONS4,typename O5=cds::opt::none +#define CDS_DECL_OPTIONS6 CDS_DECL_OPTIONS5,typename O6=cds::opt::none +#define CDS_DECL_OPTIONS7 CDS_DECL_OPTIONS6,typename O7=cds::opt::none +#define CDS_DECL_OPTIONS8 CDS_DECL_OPTIONS7,typename O8=cds::opt::none +#define CDS_DECL_OPTIONS9 CDS_DECL_OPTIONS8,typename O9=cds::opt::none +#define CDS_DECL_OPTIONS10 CDS_DECL_OPTIONS9,typename O10=cds::opt::none +#define CDS_DECL_OPTIONS11 CDS_DECL_OPTIONS10,typename O11=cds::opt::none +#define CDS_DECL_OPTIONS12 CDS_DECL_OPTIONS11,typename O12=cds::opt::none +#define CDS_DECL_OPTIONS13 CDS_DECL_OPTIONS12,typename O13=cds::opt::none +#define CDS_DECL_OPTIONS14 CDS_DECL_OPTIONS13,typename O14=cds::opt::none +#define CDS_DECL_OPTIONS15 CDS_DECL_OPTIONS14,typename O15=cds::opt::none +#define CDS_DECL_OPTIONS16 CDS_DECL_OPTIONS15,typename O16=cds::opt::none + +#define CDS_DECL_OPTIONS CDS_DECL_OPTIONS16 + +#define CDS_DECL_OTHER_OPTIONS1 typename OO1=cds::opt::none +#define CDS_DECL_OTHER_OPTIONS2 CDS_DECL_OTHER_OPTIONS1,typename OO2=cds::opt::none +#define CDS_DECL_OTHER_OPTIONS3 CDS_DECL_OTHER_OPTIONS2,typename OO3=cds::opt::none +#define CDS_DECL_OTHER_OPTIONS4 CDS_DECL_OTHER_OPTIONS3,typename OO4=cds::opt::none +#define CDS_DECL_OTHER_OPTIONS5 CDS_DECL_OTHER_OPTIONS4,typename OO5=cds::opt::none +#define CDS_DECL_OTHER_OPTIONS6 CDS_DECL_OTHER_OPTIONS5,typename OO6=cds::opt::none +#define CDS_DECL_OTHER_OPTIONS7 CDS_DECL_OTHER_OPTIONS6,typename OO7=cds::opt::none +#define CDS_DECL_OTHER_OPTIONS8 CDS_DECL_OTHER_OPTIONS7,typename OO8=cds::opt::none +#define CDS_DECL_OTHER_OPTIONS9 CDS_DECL_OTHER_OPTIONS8,typename OO9=cds::opt::none +#define CDS_DECL_OTHER_OPTIONS10 CDS_DECL_OTHER_OPTIONS9,typename OO10=cds::opt::none +#define CDS_DECL_OTHER_OPTIONS11 CDS_DECL_OTHER_OPTIONS10,typename OO11=cds::opt::none +#define CDS_DECL_OTHER_OPTIONS12 CDS_DECL_OTHER_OPTIONS11,typename OO12=cds::opt::none +#define CDS_DECL_OTHER_OPTIONS13 CDS_DECL_OTHER_OPTIONS12,typename OO13=cds::opt::none +#define CDS_DECL_OTHER_OPTIONS14 CDS_DECL_OTHER_OPTIONS13,typename OO14=cds::opt::none +#define CDS_DECL_OTHER_OPTIONS15 CDS_DECL_OTHER_OPTIONS14,typename OO15=cds::opt::none +#define CDS_DECL_OTHER_OPTIONS16 CDS_DECL_OTHER_OPTIONS15,typename OO16=cds::opt::none + +// for template specializations +#define CDS_SPEC_OPTIONS1 typename O1 +#define CDS_SPEC_OPTIONS2 CDS_SPEC_OPTIONS1,typename O2 +#define CDS_SPEC_OPTIONS3 CDS_SPEC_OPTIONS2,typename O3 +#define CDS_SPEC_OPTIONS4 CDS_SPEC_OPTIONS3,typename O4 +#define CDS_SPEC_OPTIONS5 CDS_SPEC_OPTIONS4,typename O5 +#define CDS_SPEC_OPTIONS6 CDS_SPEC_OPTIONS5,typename O6 +#define CDS_SPEC_OPTIONS7 CDS_SPEC_OPTIONS6,typename O7 +#define CDS_SPEC_OPTIONS8 CDS_SPEC_OPTIONS7,typename O8 +#define CDS_SPEC_OPTIONS9 CDS_SPEC_OPTIONS8,typename O9 +#define CDS_SPEC_OPTIONS10 CDS_SPEC_OPTIONS9,typename O10 +#define CDS_SPEC_OPTIONS11 CDS_SPEC_OPTIONS10,typename O11 +#define CDS_SPEC_OPTIONS12 CDS_SPEC_OPTIONS11,typename O12 +#define CDS_SPEC_OPTIONS13 CDS_SPEC_OPTIONS12,typename O13 +#define CDS_SPEC_OPTIONS14 CDS_SPEC_OPTIONS13,typename O14 +#define CDS_SPEC_OPTIONS15 CDS_SPEC_OPTIONS14,typename O15 +#define CDS_SPEC_OPTIONS16 CDS_SPEC_OPTIONS15,typename O16 + +#define CDS_SPEC_OPTIONS CDS_SPEC_OPTIONS16 + +#define CDS_OPTIONS1 O1 +#define CDS_OPTIONS2 O1,O2 +#define CDS_OPTIONS3 O1,O2,O3 +#define CDS_OPTIONS4 O1,O2,O3,O4 +#define CDS_OPTIONS5 O1,O2,O3,O4,O5 +#define CDS_OPTIONS6 O1,O2,O3,O4,O5,O6 +#define CDS_OPTIONS7 O1,O2,O3,O4,O5,O6,O7 +#define CDS_OPTIONS8 O1,O2,O3,O4,O5,O6,O7,O8 +#define CDS_OPTIONS9 O1,O2,O3,O4,O5,O6,O7,O8,O9 +#define CDS_OPTIONS10 O1,O2,O3,O4,O5,O6,O7,O8,O9,O10 +#define CDS_OPTIONS11 O1,O2,O3,O4,O5,O6,O7,O8,O9,O10,O11 +#define CDS_OPTIONS12 O1,O2,O3,O4,O5,O6,O7,O8,O9,O10,O11,O12 +#define CDS_OPTIONS13 O1,O2,O3,O4,O5,O6,O7,O8,O9,O10,O11,O12,O13 +#define CDS_OPTIONS14 O1,O2,O3,O4,O5,O6,O7,O8,O9,O10,O11,O12,O13,O14 +#define CDS_OPTIONS15 O1,O2,O3,O4,O5,O6,O7,O8,O9,O10,O11,O12,O13,O14,O15 +#define CDS_OPTIONS16 O1,O2,O3,O4,O5,O6,O7,O8,O9,O10,O11,O12,O13,O14,O15,O16 +//#define CDS_OPTIONS17 O1,O2,O3,O4,O5,O6,O7,O8,O9,O10,O11,O12,O13,O14,O15,O16,O17 +//#define CDS_OPTIONS18 O1,O2,O3,O4,O5,O6,O7,O8,O9,O10,O11,O12,O13,O14,O15,O16,O17,O18 +//#define CDS_OPTIONS19 O1,O2,O3,O4,O5,O6,O7,O8,O9,O10,O11,O12,O13,O14,O15,O16,O17,O18,O19 +//#define CDS_OPTIONS20 O1,O2,O3,O4,O5,O6,O7,O8,O9,O10,O11,O12,O13,O14,O15,O16,O17,O18,O19,O20 + +#define CDS_OPTIONS CDS_OPTIONS16 + +#define CDS_OTHER_OPTIONS1 OO1 +#define CDS_OTHER_OPTIONS2 OO1,OO2 +#define CDS_OTHER_OPTIONS3 OO1,OO2,OO3 +#define CDS_OTHER_OPTIONS4 OO1,OO2,OO3,OO4 +#define CDS_OTHER_OPTIONS5 OO1,OO2,OO3,OO4,OO5 +#define CDS_OTHER_OPTIONS6 OO1,OO2,OO3,OO4,OO5,OO6 +#define CDS_OTHER_OPTIONS7 OO1,OO2,OO3,OO4,OO5,OO6,OO7 +#define CDS_OTHER_OPTIONS8 OO1,OO2,OO3,OO4,OO5,OO6,OO7,OO8 +#define CDS_OTHER_OPTIONS9 OO1,OO2,OO3,OO4,OO5,OO6,OO7,OO8,OO9 +#define CDS_OTHER_OPTIONS10 OO1,OO2,OO3,OO4,OO5,OO6,OO7,OO8,OO9,OO10 +#define CDS_OTHER_OPTIONS11 OO1,OO2,OO3,OO4,OO5,OO6,OO7,OO8,OO9,OO10,OO11 +#define CDS_OTHER_OPTIONS12 OO1,OO2,OO3,OO4,OO5,OO6,OO7,OO8,OO9,OO10,OO11,OO12 +#define CDS_OTHER_OPTIONS13 OO1,OO2,OO3,OO4,OO5,OO6,OO7,OO8,OO9,OO10,OO11,OO12,OO13 +#define CDS_OTHER_OPTIONS14 OO1,OO2,OO3,OO4,OO5,OO6,OO7,OO8,OO9,OO10,OO11,OO12,OO13,OO14 +#define CDS_OTHER_OPTIONS15 OO1,OO2,OO3,OO4,OO5,OO6,OO7,OO8,OO9,OO10,OO11,OO12,OO13,OO14,OO15 +#define CDS_OTHER_OPTIONS16 OO1,OO2,OO3,OO4,OO5,OO6,OO7,OO8,OO9,OO10,OO11,OO12,OO13,OO14,OO15,OO16 + +namespace cds { namespace opt { + + template + struct do_pack + { + // Use "pack" member template to pack options + typedef typename Option::template pack type; + }; + + template < + typename DefaultOptions + ,typename O1 = none + ,typename O2 = none + ,typename O3 = none + ,typename O4 = none + ,typename O5 = none + ,typename O6 = none + ,typename O7 = none + ,typename O8 = none + ,typename O9 = none + ,typename O10 = none + ,typename O11 = none + ,typename O12 = none + ,typename O13 = none + ,typename O14 = none + ,typename O15 = none + ,typename O16 = none + > + struct make_options { + /* + typedef + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + DefaultOptions + ,O16 + >::type + ,O15 + >::type + ,O14 + >::type + ,O13 + >::type + ,O12 + >::type + ,O11 + >::type + ,O10 + >::type + ,O9 + >::type + ,O8 + >::type + ,O7 + >::type + ,O6 + >::type + ,O5 + >::type + ,O4 + >::type + ,O3 + >::type + ,O2 + >::type + ,O1 + >::type + type; + */ + struct type: public + do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + typename do_pack< + DefaultOptions + ,O16 + >::type + ,O15 + >::type + ,O14 + >::type + ,O13 + >::type + ,O12 + >::type + ,O11 + >::type + ,O10 + >::type + ,O9 + >::type + ,O8 + >::type + ,O7 + >::type + ,O6 + >::type + ,O5 + >::type + ,O4 + >::type + ,O3 + >::type + ,O2 + >::type + ,O1 + >::type + {}; + }; + + + // ***************************************************************** + // find_type_traits metafunction + // ***************************************************************** + + namespace details { + template + struct find_type_traits_option { + typedef DefaultOptions type; + }; + + template + struct find_type_traits_option< cds::opt::type_traits, DefaultOptions> { + typedef T type; + }; + } + + template < + typename DefaultOptions + ,typename O1 = none + ,typename O2 = none + ,typename O3 = none + ,typename O4 = none + ,typename O5 = none + ,typename O6 = none + ,typename O7 = none + ,typename O8 = none + ,typename O9 = none + ,typename O10 = none + ,typename O11 = none + ,typename O12 = none + ,typename O13 = none + ,typename O14 = none + ,typename O15 = none + ,typename O16 = none + > + struct find_type_traits { + /* + typedef typename details::find_type_traits_option< O1, + typename details::find_type_traits_option< O2, + typename details::find_type_traits_option< O3, + typename details::find_type_traits_option< O4, + typename details::find_type_traits_option< O5, + typename details::find_type_traits_option< O6, + typename details::find_type_traits_option< O7, + typename details::find_type_traits_option< O8, + typename details::find_type_traits_option< O9, + typename details::find_type_traits_option< O10, + typename details::find_type_traits_option< O11, + typename details::find_type_traits_option< O12, + typename details::find_type_traits_option< O13, + typename details::find_type_traits_option< O14, + typename details::find_type_traits_option< O15, + typename details::find_type_traits_option< O16, DefaultOptions>::type + >::type + >::type + >::type + >::type + >::type + >::type + >::type + >::type + >::type + >::type + >::type + >::type + >::type + >::type + >::type type; + */ + struct type: public details::find_type_traits_option< O1, + typename details::find_type_traits_option< O2, + typename details::find_type_traits_option< O3, + typename details::find_type_traits_option< O4, + typename details::find_type_traits_option< O5, + typename details::find_type_traits_option< O6, + typename details::find_type_traits_option< O7, + typename details::find_type_traits_option< O8, + typename details::find_type_traits_option< O9, + typename details::find_type_traits_option< O10, + typename details::find_type_traits_option< O11, + typename details::find_type_traits_option< O12, + typename details::find_type_traits_option< O13, + typename details::find_type_traits_option< O14, + typename details::find_type_traits_option< O15, + typename details::find_type_traits_option< O16, DefaultOptions>::type + >::type + >::type + >::type + >::type + >::type + >::type + >::type + >::type + >::type + >::type + >::type + >::type + >::type + >::type + >::type + {}; + }; + + + // ***************************************************************** + // find_option metafunction + // ***************************************************************** + namespace details { + + template + struct select_option + { + typedef void type; + }; + + template